-
Notifications
You must be signed in to change notification settings - Fork 186
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add Browser Support #295
Add Browser Support #295
Changes from 4 commits
8891244
9b9791b
bb4bacf
e6f5a70
6fba217
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,18 +1,211 @@ | ||
//go:build !windows && !js && !wasm | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why is windows excluded? |
||
|
||
package headerfs | ||
|
||
import ( | ||
"bytes" | ||
"fmt" | ||
"os" | ||
"path/filepath" | ||
"sync" | ||
|
||
"github.com/btcsuite/btcd/chaincfg/chainhash" | ||
"github.com/btcsuite/btcd/wire" | ||
"github.com/btcsuite/btcwallet/walletdb" | ||
) | ||
|
||
// ErrHeaderNotFound is returned when a target header on disk (flat file) can't | ||
// be found. | ||
type ErrHeaderNotFound struct { | ||
error | ||
// headerBufPool is a pool of bytes.Buffer that will be re-used by the various | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this would be easier to review if one commit moved the contents into this new file, then subsequent commits start to add the build tag and additional refactoring. |
||
// headerStore implementations to batch their header writes to disk. By | ||
// utilizing this variable we can minimize the total number of allocations when | ||
// writing headers to disk. | ||
var headerBufPool = sync.Pool{ | ||
New: func() interface{} { return new(bytes.Buffer) }, | ||
} | ||
|
||
// headerStore combines a on-disk set of headers within a flat file in addition | ||
// to a database which indexes that flat file. Together, these two abstractions | ||
// can be used in order to build an indexed header store for any type of | ||
// "header" as it deals only with raw bytes, and leaves it to a higher layer to | ||
// interpret those raw bytes accordingly. | ||
// | ||
// TODO(roasbeef): quickcheck coverage. | ||
type headerStore struct { | ||
mtx sync.RWMutex // nolint:structcheck // false positive because used as embedded struct only | ||
|
||
fileName string | ||
|
||
file *os.File | ||
|
||
hType HeaderType | ||
|
||
*headerIndex | ||
} | ||
|
||
// newHeaderStore creates a new headerStore given an already open database, a | ||
// target file path for the flat-file and a particular header type. The target | ||
// file will be created as necessary. | ||
func newHeaderStore(db walletdb.DB, filePath string, | ||
hType HeaderType) (*headerStore, error) { | ||
|
||
var flatFileName string | ||
switch hType { | ||
case Block: | ||
flatFileName = "block_headers.bin" | ||
case RegularFilter: | ||
flatFileName = "reg_filter_headers.bin" | ||
default: | ||
return nil, fmt.Errorf("unrecognized filter type: %v", hType) | ||
} | ||
|
||
flatFileName = filepath.Join(filePath, flatFileName) | ||
|
||
// We'll open the file, creating it if necessary and ensuring that all | ||
// writes are actually appends to the end of the file. | ||
fileFlags := os.O_RDWR | os.O_APPEND | os.O_CREATE | ||
headerFile, err := os.OpenFile(flatFileName, fileFlags, 0644) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
// With the file open, we'll then create the header index so we can | ||
// have random access into the flat files. | ||
index, err := newHeaderIndex(db, hType) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
return &headerStore{ | ||
fileName: flatFileName, | ||
file: headerFile, | ||
hType: hType, | ||
headerIndex: index, | ||
}, nil | ||
} | ||
|
||
// WriteHeaders writes a set of headers to disk and updates the index in a | ||
// single atomic transaction. | ||
// | ||
// NOTE: Part of the BlockHeaderStore interface. | ||
func (h *blockHeaderStore) WriteHeaders(hdrs ...BlockHeader) error { | ||
// Lock store for write. | ||
h.mtx.Lock() | ||
defer h.mtx.Unlock() | ||
|
||
// First, we'll grab a buffer from the write buffer pool so we an | ||
// reduce our total number of allocations, and also write the headers | ||
// in a single swoop. | ||
headerBuf := headerBufPool.Get().(*bytes.Buffer) | ||
headerBuf.Reset() | ||
defer headerBufPool.Put(headerBuf) | ||
|
||
// Next, we'll write out all the passed headers in series into the | ||
// buffer we just extracted from the pool. | ||
for _, header := range hdrs { | ||
if err := header.Serialize(headerBuf); err != nil { | ||
return err | ||
} | ||
} | ||
|
||
// With all the headers written to the buffer, we'll now write out the | ||
// entire batch in a single write call. | ||
if err := h.appendRaw(headerBuf.Bytes()); err != nil { | ||
return err | ||
} | ||
|
||
// Once those are written, we'll then collate all the headers into | ||
// headerEntry instances so we can write them all into the index in a | ||
// single atomic batch. | ||
headerLocs := make([]headerEntry, len(hdrs)) | ||
for i, header := range hdrs { | ||
headerLocs[i] = header.toIndexEntry() | ||
} | ||
|
||
return h.addHeaders(headerLocs) | ||
} | ||
|
||
// WriteHeaders writes a batch of filter headers to persistent storage. The | ||
// headers themselves are appended to the flat file, and then the index updated | ||
// to reflect the new entires. | ||
func (f *FilterHeaderStore) WriteHeaders(hdrs ...FilterHeader) error { | ||
// Lock store for write. | ||
f.mtx.Lock() | ||
defer f.mtx.Unlock() | ||
|
||
// If there are 0 headers to be written, return immediately. This | ||
// prevents the newTip assignment from panicking because of an index | ||
// of -1. | ||
if len(hdrs) == 0 { | ||
return nil | ||
} | ||
|
||
// First, we'll grab a buffer from the write buffer pool so we an | ||
// reduce our total number of allocations, and also write the headers | ||
// in a single swoop. | ||
headerBuf := headerBufPool.Get().(*bytes.Buffer) | ||
headerBuf.Reset() | ||
defer headerBufPool.Put(headerBuf) | ||
|
||
// Next, we'll write out all the passed headers in series into the | ||
// buffer we just extracted from the pool. | ||
for _, header := range hdrs { | ||
if _, err := headerBuf.Write(header.FilterHash[:]); err != nil { | ||
return err | ||
} | ||
} | ||
|
||
// With all the headers written to the buffer, we'll now write out the | ||
// entire batch in a single write call. | ||
if err := f.appendRaw(headerBuf.Bytes()); err != nil { | ||
return err | ||
} | ||
|
||
// As the block headers should already be written, we only need to | ||
// update the tip pointer for this particular header type. | ||
newTip := hdrs[len(hdrs)-1].toIndexEntry().hash | ||
return f.truncateIndex(&newTip, false) | ||
} | ||
|
||
// Remove the file. | ||
func (h *headerStore) Remove() error { | ||
// Close the file before removing it. This is required by some | ||
// OS, e.g., Windows. | ||
if err := h.file.Close(); err != nil { | ||
return err | ||
} | ||
if err := os.Remove(h.fileName); err != nil { | ||
return err | ||
} | ||
|
||
return nil | ||
} | ||
|
||
// Calculate the current height. | ||
func (h *headerStore) height() (uint32, bool, error) { | ||
fileInfo, err := h.file.Stat() | ||
if err != nil { | ||
return 0, false, err | ||
} | ||
|
||
size := fileInfo.Size() | ||
|
||
// Check if the file is empty. Fallback to a height of zero. | ||
if size == 0 { | ||
return 0, true, nil | ||
} | ||
|
||
var fileHeight uint32 | ||
|
||
// Compute the size of the current file so we can | ||
// calculate the latest header written to disk. | ||
switch h.hType { | ||
case Block: | ||
fileHeight = uint32(size/80) - 1 | ||
|
||
case RegularFilter: | ||
fileHeight = uint32(size/32) - 1 | ||
} | ||
|
||
return fileHeight, false, nil | ||
} | ||
|
||
// appendRaw appends a new raw header to the end of the flat file. | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Perhaps we can instead modify the tests to use a build tag to specify
tempdb
vsbdb
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We could have a
NewTestDB
func defined for the build w/ and w/o the build tag defined.