intial release
This commit is contained in:
		
							parent
							
								
									abbfcabe8d
								
							
						
					
					
						commit
						dd7e923bff
					
				
					 11 changed files with 1234 additions and 0 deletions
				
			
		
							
								
								
									
										380
									
								
								asar/asar.go
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										380
									
								
								asar/asar.go
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,380 @@ | |||
| package asar | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"path/filepath" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"vaclive.party/software/lychee-slicer/asar/pickle" | ||||
| ) | ||||
| 
 | ||||
| type EntryMetadata struct { | ||||
| 	Unpacked *bool `json:"unpacked,omitempty"` | ||||
| } | ||||
| 
 | ||||
| type FilesystemDirectoryEntry struct { | ||||
| 	EntryMetadata | ||||
| 	Files    map[string]FilesystemEntry | ||||
| 	nezuMeta *map[string]string | ||||
| } | ||||
| 
 | ||||
| func (f *FilesystemDirectoryEntry) UnmarshalJSON(data []byte) error { | ||||
| 	var raw struct { | ||||
| 		Unpacked *bool                       `json:"unpacked"` | ||||
| 		Files    *map[string]json.RawMessage `json:"files"` | ||||
| 		NezuMeta *map[string]string          `json:"nezuMeta,omitempty"` | ||||
| 	} | ||||
| 	if err := json.Unmarshal(data, &raw); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if raw.Files == nil { | ||||
| 		return fmt.Errorf("missing files") | ||||
| 	} | ||||
| 	f.Unpacked = raw.Unpacked | ||||
| 	f.Files = make(map[string]FilesystemEntry) | ||||
| 	f.nezuMeta = raw.NezuMeta | ||||
| 	for name, entry := range *raw.Files { | ||||
| 		var dir FilesystemDirectoryEntry | ||||
| 		if err := json.Unmarshal(entry, &dir); err == nil { | ||||
| 			f.Files[name] = &dir | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		var file FilesystemFileEntry | ||||
| 		if err := json.Unmarshal(entry, &file); err == nil && file.Size != nil { | ||||
| 			f.Files[name] = &file | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		var link FilesystemLinkEntry | ||||
| 		if err := json.Unmarshal(entry, &link); err == nil && link.Link != nil { | ||||
| 			f.Files[name] = &link | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		return fmt.Errorf("unexpected entry type: %s", name) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (f *FilesystemDirectoryEntry) MarshalJSON() ([]byte, error) { | ||||
| 	raw := struct { | ||||
| 		Unpacked *bool                      `json:"unpacked,omitempty"` | ||||
| 		Files    map[string]json.RawMessage `json:"files"` | ||||
| 		NezuMeta *map[string]string         `json:"nezuMeta,omitempty"` | ||||
| 	}{ | ||||
| 		Unpacked: f.Unpacked, | ||||
| 		Files:    make(map[string]json.RawMessage), | ||||
| 		NezuMeta: f.nezuMeta, | ||||
| 	} | ||||
| 	for name, entry := range f.Files { | ||||
| 		data, err := json.Marshal(entry) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		raw.Files[name] = data | ||||
| 	} | ||||
| 	return json.Marshal(raw) | ||||
| } | ||||
| 
 | ||||
| type FilesystemFileEntry struct { | ||||
| 	EntryMetadata | ||||
| 	Executable *bool          `json:"executable,omitempty"` | ||||
| 	Offset     *string        `json:"offset,omitempty"` | ||||
| 	Size       *int64         `json:"size"` | ||||
| 	Integrity  *FileIntegrity `json:"integrity,omitempty"` | ||||
| } | ||||
| 
 | ||||
| type FilesystemLinkEntry struct { | ||||
| 	EntryMetadata | ||||
| 	Link *string `json:"link"` | ||||
| } | ||||
| 
 | ||||
| type FilesystemEntry interface{} | ||||
| 
 | ||||
| type Filesystem struct { | ||||
| 	storage FilesystemStorage | ||||
| 	header  *FilesystemDirectoryEntry | ||||
| } | ||||
| 
 | ||||
| func newFileSystemDirectoryEntry() *FilesystemDirectoryEntry { | ||||
| 	return &FilesystemDirectoryEntry{Files: make(map[string]FilesystemEntry)} | ||||
| } | ||||
| 
 | ||||
| func newFileSystemFileEntry() *FilesystemFileEntry { | ||||
| 	return &FilesystemFileEntry{} | ||||
| } | ||||
| 
 | ||||
| func newFileSystemLinkEntry() *FilesystemLinkEntry { | ||||
| 	return &FilesystemLinkEntry{} | ||||
| } | ||||
| 
 | ||||
| func NewFilesystem() *Filesystem { | ||||
| 	return &Filesystem{ | ||||
| 		storage: *NewFilesystemStorage(bytes.NewReader([]byte{}), NewBytesReadAtWriter([]byte{})), | ||||
| 		header:  newFileSystemDirectoryEntry(), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func OpenFilesystem(reader io.ReaderAt, readerSize int64) (*Filesystem, error) { | ||||
| 	sizeBytes := make([]byte, 8) | ||||
| 	if _, err := reader.ReadAt(sizeBytes, 0); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	sizePickle := pickle.CreateFromBuffer(sizeBytes) | ||||
| 	headerSize := sizePickle.CreateIterator().ReadUInt32() | ||||
| 
 | ||||
| 	headerBytes := make([]byte, headerSize) | ||||
| 	if _, err := reader.ReadAt(headerBytes, 8); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	headerPickle := pickle.CreateFromBuffer(headerBytes) | ||||
| 	headerJson := headerPickle.CreateIterator().ReadString() | ||||
| 
 | ||||
| 	var header FilesystemDirectoryEntry | ||||
| 	if err := json.Unmarshal([]byte(headerJson), &header); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	sectionReader := io.NewSectionReader(reader, 8+int64(headerSize), readerSize-8-int64(headerSize)) | ||||
| 
 | ||||
| 	return &Filesystem{ | ||||
| 		storage: *NewFilesystemStorage(sectionReader, NewBytesReadAtWriter([]byte{})), | ||||
| 		header:  &header, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) searchNodeFromDirectory(p string) (*FilesystemDirectoryEntry, error) { | ||||
| 	entry := fs.header | ||||
| 	// windows moment | ||||
| 	dirs := strings.FieldsFunc(p, func(r rune) bool { return r == '/' || r == '\\' }) | ||||
| 	for _, dir := range dirs { | ||||
| 		if dir != "." { | ||||
| 			if entry.Files != nil { | ||||
| 				if _, ok := entry.Files[dir]; !ok { | ||||
| 					entry.Files[dir] = newFileSystemDirectoryEntry() | ||||
| 				} | ||||
| 				entry = entry.Files[dir].(*FilesystemDirectoryEntry) | ||||
| 			} else { | ||||
| 				return nil, fmt.Errorf("unexpected directory state while traversing: %s", p) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return entry, nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) searchNodeFromPath(p string) (FilesystemEntry, error) { | ||||
| 	if p == "." { | ||||
| 		return fs.header, nil | ||||
| 	} | ||||
| 	println(filepath.Base(p), filepath.Dir(p)) | ||||
| 	name := filepath.Base(p) | ||||
| 	dirNode, err := fs.searchNodeFromDirectory(filepath.Dir(p)) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if dirNode.Files == nil { | ||||
| 		dirNode.Files = make(map[string]FilesystemEntry) | ||||
| 	} | ||||
| 	if _, ok := dirNode.Files[name]; !ok { | ||||
| 		dirNode.Files[name] = newFileSystemFileEntry() | ||||
| 	} | ||||
| 	return dirNode.Files[name], nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) GetMetaFlag(flag string) string { | ||||
| 	if fs.header.nezuMeta != nil { | ||||
| 		if value, ok := (*fs.header.nezuMeta)[flag]; ok { | ||||
| 			return value | ||||
| 		} | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) SetMetaFlag(flag string, value string) error { | ||||
| 	if fs.header.nezuMeta == nil { | ||||
| 		fs.header.nezuMeta = &map[string]string{} | ||||
| 	} | ||||
| 	(*fs.header.nezuMeta)[flag] = value | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) InsertDirectory(p string) (map[string]FilesystemEntry, error) { | ||||
| 	node, err := fs.searchNodeFromPath(p) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	dirNode := node.(*FilesystemDirectoryEntry) | ||||
| 	return dirNode.Files, nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) InsertFile(p string, data []byte, executable bool) error { | ||||
| 	size := int64(len(data)) | ||||
| 	if size > math.MaxUint32 { | ||||
| 		return fmt.Errorf("%s: file size exceeds 4GB", p) | ||||
| 	} | ||||
| 
 | ||||
| 	dirNode, err := fs.searchNodeFromDirectory(filepath.Dir(p)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if dirNode.Unpacked != nil && *dirNode.Unpacked { | ||||
| 		return fmt.Errorf("unpacked directories are not supported: %s", p) | ||||
| 	} | ||||
| 
 | ||||
| 	node, err := fs.searchNodeFromPath(p) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	fileNode := node.(*FilesystemFileEntry) | ||||
| 
 | ||||
| 	fileNode.Size = &size | ||||
| 	offset := fmt.Sprintf("%d", fs.storage.Size()) | ||||
| 	fileNode.Offset = &offset | ||||
| 	fileNode.Integrity = getFileIntegrity(data) | ||||
| 	if executable { | ||||
| 		fileNode.Executable = &executable | ||||
| 	} | ||||
| 	fs.storage.Write(data) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) ReadFile(p string, followLinks bool) ([]byte, error) { | ||||
| 	file, err := fs.GetFile(p, followLinks) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if file == nil { | ||||
| 		return nil, fmt.Errorf("file not found: %s", p) | ||||
| 	} | ||||
| 	if linkEntry, ok := file.(*FilesystemLinkEntry); ok { | ||||
| 		return []byte(*linkEntry.Link), nil | ||||
| 	} | ||||
| 
 | ||||
| 	fileEntry := file.(*FilesystemFileEntry) | ||||
| 	if fileEntry.Unpacked != nil && *fileEntry.Unpacked { | ||||
| 		return nil, fmt.Errorf("unpacked files are not supported: %s", p) | ||||
| 	} | ||||
| 
 | ||||
| 	offset, err := strconv.ParseInt(*fileEntry.Offset, 10, 64) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	ret := make([]byte, *fileEntry.Size) | ||||
| 	if _, err := fs.storage.ReadAt(ret, offset); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// FIXME: integrity check | ||||
| 
 | ||||
| 	return ret, nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) InsertLink(p string, target string) error { | ||||
| 	dirNode, err := fs.searchNodeFromDirectory(filepath.Dir(p)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if dirNode.Unpacked != nil && *dirNode.Unpacked { | ||||
| 		return fmt.Errorf("unexpected directories not set to unpack: %s", p) | ||||
| 	} | ||||
| 
 | ||||
| 	name := filepath.Base(p) | ||||
| 	if _, ok := dirNode.Files[name]; !ok { | ||||
| 		dirNode.Files[name] = newFileSystemLinkEntry() | ||||
| 	} | ||||
| 
 | ||||
| 	linkNode := dirNode.Files[name].(*FilesystemLinkEntry) | ||||
| 	linkNode.Link = &target | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) ListFiles() []string { | ||||
| 	files := []string{} | ||||
| 
 | ||||
| 	// recursive function to traverse the filesystem | ||||
| 	var traverse func(string, FilesystemEntry) | ||||
| 	traverse = func(p string, entry FilesystemEntry) { | ||||
| 		switch e := entry.(type) { | ||||
| 		case *FilesystemDirectoryEntry: | ||||
| 			for name, child := range e.Files { | ||||
| 				path := name | ||||
| 				if p != "" { | ||||
| 					path = p + "/" + name | ||||
| 				} | ||||
| 				traverse(path, child) | ||||
| 			} | ||||
| 		case *FilesystemFileEntry: | ||||
| 			files = append(files, p) | ||||
| 		case *FilesystemLinkEntry: | ||||
| 			files = append(files, p) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	traverse("", fs.header) | ||||
| 	return files | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) GetNode(p string) (FilesystemEntry, error) { | ||||
| 	node, err := fs.searchNodeFromDirectory(filepath.Dir(p)) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	name := filepath.Base(p) | ||||
| 	if name != "" { | ||||
| 		fmt.Println(p, filepath.Base(p), filepath.Dir(p), name, len(node.Files)) | ||||
| 		return node.Files[name], nil | ||||
| 	} | ||||
| 	return node, nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) GetFile(p string, followLinks bool) (FilesystemEntry, error) { | ||||
| 	info, err := fs.GetNode(p) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if linkEntry, ok := info.(*FilesystemLinkEntry); ok && followLinks { | ||||
| 		return fs.GetFile(*linkEntry.Link, followLinks) | ||||
| 	} | ||||
| 	if _, ok := info.(*FilesystemDirectoryEntry); ok { | ||||
| 		return nil, fmt.Errorf("not a file: %s", p) | ||||
| 	} | ||||
| 	return info, nil | ||||
| } | ||||
| 
 | ||||
| func (fs *Filesystem) Save(writer io.Writer) error { | ||||
| 	headerJson, err := json.Marshal(fs.header) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	headerPickle := pickle.CreateEmpty() | ||||
| 	headerPickle.WriteString(string(headerJson)) | ||||
| 	headerBytes := headerPickle.ToBuffer() | ||||
| 
 | ||||
| 	sizePickle := pickle.CreateEmpty() | ||||
| 	sizePickle.WriteUInt32(uint32(len(headerBytes))) | ||||
| 	sizeBytes := sizePickle.ToBuffer() | ||||
| 
 | ||||
| 	if _, err := writer.Write(sizeBytes); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := writer.Write(headerBytes); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err := fs.storage.WriteTo(writer); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										42
									
								
								asar/integrity.go
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								asar/integrity.go
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,42 @@ | |||
| package asar | ||||
| 
 | ||||
| import ( | ||||
| 	"crypto" | ||||
| 	"fmt" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	ALGORITHM  = "SHA256" | ||||
| 	BLOCK_SIZE = 4 * 1024 * 1024 | ||||
| ) | ||||
| 
 | ||||
| type FileIntegrity struct { | ||||
| 	Algorithm string   `json:"algorithm"` | ||||
| 	Hash      string   `json:"hash"` | ||||
| 	BlockSize int      `json:"blockSize"` | ||||
| 	Blocks    []string `json:"blocks"` | ||||
| } | ||||
| 
 | ||||
| func getFileIntegrity(data []byte) *FileIntegrity { | ||||
| 	blockHashes := []string{} | ||||
| 	for i := 0; i < len(data); i += BLOCK_SIZE { | ||||
| 		end := i + BLOCK_SIZE | ||||
| 		if end > len(data) { | ||||
| 			end = len(data) | ||||
| 		} | ||||
| 		blockHashes = append(blockHashes, hashBlock(data[i:end])) | ||||
| 	} | ||||
| 
 | ||||
| 	return &FileIntegrity{ | ||||
| 		Algorithm: ALGORITHM, | ||||
| 		Hash:      hashBlock(data), | ||||
| 		BlockSize: BLOCK_SIZE, | ||||
| 		Blocks:    blockHashes, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func hashBlock(block []byte) string { | ||||
| 	hash := crypto.SHA256.New() | ||||
| 	hash.Write(block) | ||||
| 	return fmt.Sprintf("%x", hash.Sum(nil)) | ||||
| } | ||||
							
								
								
									
										225
									
								
								asar/pickle/pickle.go
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										225
									
								
								asar/pickle/pickle.go
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,225 @@ | |||
| package pickle | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	SIZE_UINT32        = 4 | ||||
| 	PAYLOAD_UNIT       = 64 | ||||
| 	CAPACITY_READ_ONLY = 1 << 53 | ||||
| ) | ||||
| 
 | ||||
| func alignInt(i, alignment int) int { | ||||
| 	return i + ((alignment - (i % alignment)) % alignment) | ||||
| } | ||||
| 
 | ||||
| type PickleIterator struct { | ||||
| 	payload       []byte | ||||
| 	payloadOffset int | ||||
| 	readIndex     int | ||||
| 	endIndex      int | ||||
| } | ||||
| 
 | ||||
| func NewPickleIterator(pickle *Pickle) *PickleIterator { | ||||
| 	return &PickleIterator{ | ||||
| 		payload:       pickle.header, | ||||
| 		payloadOffset: pickle.headerSize, | ||||
| 		readIndex:     0, | ||||
| 		endIndex:      pickle.getPayloadSize(), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) ReadBool() bool { | ||||
| 	return pi.ReadInt() != 0 | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) ReadInt() int32 { | ||||
| 	buf := pi.readBytes(binary.Size(int32(0))) | ||||
| 	return int32(binary.LittleEndian.Uint32(buf)) | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) ReadUInt32() uint32 { | ||||
| 	buf := pi.readBytes(binary.Size(uint32(0))) | ||||
| 	return binary.LittleEndian.Uint32(buf) | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) ReadInt64() int64 { | ||||
| 	buf := pi.readBytes(binary.Size(int64(0))) | ||||
| 	return int64(binary.LittleEndian.Uint64(buf)) | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) ReadUInt64() uint64 { | ||||
| 	buf := pi.readBytes(binary.Size(uint64(0))) | ||||
| 	return binary.LittleEndian.Uint64(buf) | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) ReadFloat() float32 { | ||||
| 	return math.Float32frombits(pi.ReadUInt32()) | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) readDouble() float64 { | ||||
| 	return math.Float64frombits(pi.ReadUInt64()) | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) ReadString() string { | ||||
| 	length := pi.ReadInt() | ||||
| 	return string(pi.readBytes(int(length))) | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) readBytes(length int) []byte { | ||||
| 	readPayloadOffset := pi.getReadPayloadOffsetAndAdvance(length) | ||||
| 	return pi.payload[readPayloadOffset : readPayloadOffset+length] | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) getReadPayloadOffsetAndAdvance(length int) int { | ||||
| 	if length > pi.endIndex-pi.readIndex { | ||||
| 		pi.readIndex = pi.endIndex | ||||
| 		panic(errors.New("failed to read data with length of " + fmt.Sprint(length))) | ||||
| 	} | ||||
| 	readPayloadOffset := pi.payloadOffset + pi.readIndex | ||||
| 	pi.advance(length) | ||||
| 	return readPayloadOffset | ||||
| } | ||||
| 
 | ||||
| func (pi *PickleIterator) advance(size int) { | ||||
| 	alignedSize := alignInt(size, SIZE_UINT32) | ||||
| 	if pi.endIndex-pi.readIndex < alignedSize { | ||||
| 		pi.readIndex = pi.endIndex | ||||
| 	} else { | ||||
| 		pi.readIndex += alignedSize | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| type Pickle struct { | ||||
| 	header              []byte | ||||
| 	headerSize          int | ||||
| 	capacityAfterHeader int | ||||
| 	writeOffset         int | ||||
| } | ||||
| 
 | ||||
| func newPickle(buffer []byte) *Pickle { | ||||
| 	p := &Pickle{} | ||||
| 	if buffer != nil { | ||||
| 		p.header = buffer | ||||
| 		p.headerSize = len(buffer) - p.getPayloadSize() | ||||
| 		p.capacityAfterHeader = CAPACITY_READ_ONLY | ||||
| 		p.writeOffset = 0 | ||||
| 		if p.headerSize > len(buffer) { | ||||
| 			p.headerSize = 0 | ||||
| 		} | ||||
| 		if p.headerSize != alignInt(p.headerSize, SIZE_UINT32) { | ||||
| 			p.headerSize = 0 | ||||
| 		} | ||||
| 		if p.headerSize == 0 { | ||||
| 			p.header = make([]byte, 0) | ||||
| 		} | ||||
| 	} else { | ||||
| 		p.header = make([]byte, 0) | ||||
| 		p.headerSize = SIZE_UINT32 | ||||
| 		p.capacityAfterHeader = 0 | ||||
| 		p.writeOffset = 0 | ||||
| 		p.resize(PAYLOAD_UNIT) | ||||
| 		p.setPayloadSize(0) | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
| 
 | ||||
| func CreateEmpty() *Pickle { | ||||
| 	return newPickle(nil) | ||||
| } | ||||
| 
 | ||||
| func CreateFromBuffer(buffer []byte) *Pickle { | ||||
| 	return newPickle(buffer) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) GetHeader() []byte { | ||||
| 	return p.header | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) GetHeaderSize() int { | ||||
| 	return p.headerSize | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) CreateIterator() *PickleIterator { | ||||
| 	return NewPickleIterator(p) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) ToBuffer() []byte { | ||||
| 	return p.header[:p.headerSize+p.getPayloadSize()] | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) WriteBool(value bool) { | ||||
| 	if value { | ||||
| 		p.WriteInt(1) | ||||
| 	} | ||||
| 	p.WriteInt(0) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) WriteInt(value int32) { | ||||
| 	p.writeBytes(value, binary.LittleEndian) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) WriteUInt32(value uint32) { | ||||
| 	p.writeBytes(value, binary.LittleEndian) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) WriteInt64(value int64) { | ||||
| 	p.writeBytes(value, binary.LittleEndian) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) WriteUInt64(value uint64) { | ||||
| 	p.writeBytes(value, binary.LittleEndian) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) WriteFloat(value float32) { | ||||
| 	p.writeBytes(math.Float32bits(value), binary.LittleEndian) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) WriteDouble(value float64) { | ||||
| 	p.writeBytes(math.Float64bits(value), binary.LittleEndian) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) WriteString(value string) { | ||||
| 	length := len(value) | ||||
| 	p.WriteInt(int32(length)) | ||||
| 	p.writeBytes([]byte(value), binary.LittleEndian) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) setPayloadSize(payloadSize int) { | ||||
| 	binary.LittleEndian.PutUint32(p.header[:4], uint32(payloadSize)) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) getPayloadSize() int { | ||||
| 	return int(binary.LittleEndian.Uint32(p.header[:4])) | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) writeBytes(data any, byteOrder binary.ByteOrder) { | ||||
| 	length := binary.Size(data) | ||||
| 	if length == -1 { | ||||
| 		panic(errors.New("unsupported data type")) | ||||
| 	} | ||||
| 	dataLength := alignInt(length, SIZE_UINT32) | ||||
| 	newSize := p.writeOffset + dataLength | ||||
| 	if newSize > p.capacityAfterHeader { | ||||
| 		p.resize(max(p.capacityAfterHeader*2, newSize)) | ||||
| 	} | ||||
| 	binary.Encode(p.header[p.headerSize+p.writeOffset:], byteOrder, data) | ||||
| 
 | ||||
| 	endOffset := p.headerSize + p.writeOffset + length | ||||
| 	for i := endOffset; i < endOffset+dataLength-length; i++ { | ||||
| 		p.header[i] = 0 | ||||
| 	} | ||||
| 
 | ||||
| 	p.setPayloadSize(newSize) | ||||
| 	p.writeOffset = newSize | ||||
| } | ||||
| 
 | ||||
| func (p *Pickle) resize(newCapacity int) { | ||||
| 	newCapacity = alignInt(newCapacity, PAYLOAD_UNIT) | ||||
| 	p.header = append(p.header, make([]byte, newCapacity)...) | ||||
| 	p.capacityAfterHeader = newCapacity | ||||
| } | ||||
							
								
								
									
										99
									
								
								asar/storage.go
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								asar/storage.go
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,99 @@ | |||
| package asar | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| ) | ||||
| 
 | ||||
| type SizedReaderAt interface { | ||||
| 	io.ReaderAt | ||||
| 	Size() int64 | ||||
| } | ||||
| 
 | ||||
| type WriterReaderAtWriterTo interface { | ||||
| 	io.Writer | ||||
| 	io.ReaderAt | ||||
| 	io.WriterTo | ||||
| } | ||||
| 
 | ||||
| type BytesReadAtWriter struct { | ||||
| 	s []byte | ||||
| 	i int64 | ||||
| } | ||||
| 
 | ||||
| func NewBytesReadAtWriter(s []byte) *BytesReadAtWriter { | ||||
| 	return &BytesReadAtWriter{s: s, i: 0} | ||||
| } | ||||
| 
 | ||||
| func (b *BytesReadAtWriter) Write(p []byte) (n int, err error) { | ||||
| 	b.s = append(b.s, p...) | ||||
| 	return len(p), nil | ||||
| } | ||||
| 
 | ||||
| func (b *BytesReadAtWriter) ReadAt(p []byte, off int64) (n int, err error) { | ||||
| 	// cannot modify state - see io.ReaderAt | ||||
| 	if off < 0 { | ||||
| 		return 0, errors.New("negative offset") | ||||
| 	} | ||||
| 	if off >= int64(len(b.s)) { | ||||
| 		return 0, io.EOF | ||||
| 	} | ||||
| 	n = copy(p, b.s[off:]) | ||||
| 	if n < len(p) { | ||||
| 		err = io.EOF | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func (b *BytesReadAtWriter) WriteTo(w io.Writer) (n int64, err error) { | ||||
| 	return io.Copy(w, bytes.NewReader(b.s)) | ||||
| } | ||||
| 
 | ||||
| type FilesystemStorage struct { | ||||
| 	existing  SizedReaderAt | ||||
| 	new       WriterReaderAtWriterTo | ||||
| 	newOffset int64 | ||||
| } | ||||
| 
 | ||||
| func NewFilesystemStorage(existing SizedReaderAt, new WriterReaderAtWriterTo) *FilesystemStorage { | ||||
| 	return &FilesystemStorage{ | ||||
| 		existing:  existing, | ||||
| 		new:       new, | ||||
| 		newOffset: 0, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (fs *FilesystemStorage) Size() int64 { | ||||
| 	return fs.existing.Size() + fs.newOffset | ||||
| } | ||||
| 
 | ||||
| func (fs *FilesystemStorage) ReadAt(p []byte, off int64) (n int, err error) { | ||||
| 	existingSize := fs.existing.Size() | ||||
| 	if off < existingSize { | ||||
| 		// error when cross boundary | ||||
| 		if off+int64(len(p)) > existingSize { | ||||
| 			return 0, io.ErrUnexpectedEOF | ||||
| 		} | ||||
| 		return fs.existing.ReadAt(p, off) | ||||
| 	} | ||||
| 
 | ||||
| 	return fs.new.ReadAt(p, off-existingSize) | ||||
| } | ||||
| 
 | ||||
| func (fs *FilesystemStorage) Write(p []byte) (n int, err error) { | ||||
| 	n, err = fs.new.Write(p) | ||||
| 	fs.newOffset += int64(n) | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| func (fs *FilesystemStorage) WriteTo(w io.Writer) (n int64, err error) { | ||||
| 	// write existing | ||||
| 	n, err = io.Copy(w, io.NewSectionReader(fs.existing, 0, fs.existing.Size())) | ||||
| 	if err != nil { | ||||
| 		return n, err | ||||
| 	} | ||||
| 
 | ||||
| 	n2, err := fs.new.WriteTo(w) | ||||
| 	return n + n2, err | ||||
| } | ||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue