Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ssz, tests: restore non-monolith API, add README sections #24

Merged
merged 1 commit into from
Sep 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
210 changes: 209 additions & 1 deletion README.md

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions example_asymmetric_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,11 @@ func (w *WithdrawalAsym) DefineSSZ(codec *ssz.Codec) {
}

func ExampleEncodeAsymmetricObject() {
blob := make([]byte, ssz.Size((*WithdrawalAsym)(nil), ssz.ForkUnknown))
if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym), ssz.ForkUnknown); err != nil {
blob := make([]byte, ssz.Size((*WithdrawalAsym)(nil)))
if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym)); err != nil {
panic(err)
}
hash := ssz.HashSequential(new(WithdrawalAsym), ssz.ForkUnknown)
hash := ssz.HashSequential(new(WithdrawalAsym))

fmt.Printf("ssz: %#x\nhash: %#x\n", blob, hash)
// Output:
Expand Down
2 changes: 1 addition & 1 deletion example_checked_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func ExampleDecodeCheckedObject() {
blob := make([]byte, 44)

obj := new(WithdrawalChecked)
if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); err != nil {
if err := ssz.DecodeFromBytes(blob, obj); err != nil {
panic(err)
}
fmt.Printf("obj: %#x\n", obj)
Expand Down
4 changes: 2 additions & 2 deletions example_dynamic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ func (e *ExecutionPayload) DefineSSZ(codec *ssz.Codec) {
func ExampleEncodeDynamicObject() {
obj := new(ExecutionPayload)

blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown))
if err := ssz.EncodeToBytes(blob, obj, ssz.ForkUnknown); err != nil {
blob := make([]byte, ssz.Size(obj))
if err := ssz.EncodeToBytes(blob, obj); err != nil {
panic(err)
}
fmt.Printf("ssz: %#x\n", blob)
Expand Down
4 changes: 2 additions & 2 deletions example_static_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@ func (w *Withdrawal) DefineSSZ(codec *ssz.Codec) {

func ExampleEncodeStaticObject() {
out := new(bytes.Buffer)
if err := ssz.EncodeToStream(out, new(Withdrawal), ssz.ForkUnknown); err != nil {
if err := ssz.EncodeToStream(out, new(Withdrawal)); err != nil {
panic(err)
}
hash := ssz.HashSequential(new(Withdrawal), ssz.ForkUnknown)
hash := ssz.HashSequential(new(Withdrawal))

fmt.Printf("ssz: %#x\nhash: %#x\n", out, hash)
// Output:
Expand Down
2 changes: 1 addition & 1 deletion hasher.go
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ func HashSliceOfStaticObjects[T StaticObject](h *Hasher, objects []T, maxItems u
defer h.ascendMixinLayer(uint64(len(objects)), maxItems)

// If threading is disabled, or hashing nothing, do it sequentially
if !h.threads || len(objects) == 0 || len(objects)*int(Size(objects[0], h.codec.fork)) < concurrencyThreshold {
if !h.threads || len(objects) == 0 || len(objects)*int(SizeOnFork(objects[0], h.codec.fork)) < concurrencyThreshold {
for _, obj := range objects {
h.descendLayer()
obj.DefineSSZ(h.codec)
Expand Down
139 changes: 108 additions & 31 deletions ssz.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,21 @@ var sizerPool = sync.Pool{
},
}

// EncodeToStream serializes the object into a data stream. Do not use this
// method with a bytes.Buffer to write into a []byte slice, as that will do
// double the byte copying. For that use case, use EncodeToBytes instead.
func EncodeToStream(w io.Writer, obj Object, fork Fork) error {
// EncodeToStream serializes a non-monolithic object into a data stream. If the
// type contains fork-specific rules, use EncodeToStreamOnFork.
//
// Do not use this method with a bytes.Buffer to write into a []byte slice, as
// that will do double the byte copying. For that use case, use EncodeToBytes.
func EncodeToStream(w io.Writer, obj Object) error {
return EncodeToStreamOnFork(w, obj, ForkUnknown)
}

// EncodeToStreamOnFork serializes a monolithic object into a data stream. If the
// type does not contain fork-specific rules, you can also use EncodeToStream.
//
// Do not use this method with a bytes.Buffer to write into a []byte slice, as that
// will do double the byte copying. For that use case, use EncodeToBytesOnFork.
func EncodeToStreamOnFork(w io.Writer, obj Object, fork Fork) error {
codec := encoderPool.Get().(*Codec)
defer encoderPool.Put(codec)

Expand All @@ -113,13 +124,25 @@ func EncodeToStream(w io.Writer, obj Object, fork Fork) error {
return err
}

// EncodeToBytes serializes the object into a byte buffer. Don't use this method
// if you want to then write the buffer into a stream via some writer, as that
// would double the memory use for the temporary buffer. For that use case, use
// EncodeToStream instead.
func EncodeToBytes(buf []byte, obj Object, fork Fork) error {
// EncodeToBytes serializes a non-monolithic object into a byte buffer. If the
// type contains fork-specific rules, use EncodeToBytesOnFork.
//
// Don't use this method if you want to then write the buffer into a stream via
// some writer, as that would double the memory use for the temporary buffer.
// For that use case, use EncodeToStream.
func EncodeToBytes(buf []byte, obj Object) error {
return EncodeToBytesOnFork(buf, obj, ForkUnknown)
}

// EncodeToBytesOnFork serializes a monolithic object into a byte buffer. If the
// type does not contain fork-specific rules, you can also use EncodeToBytes.
//
// Don't use this method if you want to then write the buffer into a stream via
// some writer, as that would double the memory use for the temporary buffer.
// For that use case, use EncodeToStreamOnFork.
func EncodeToBytesOnFork(buf []byte, obj Object, fork Fork) error {
// Sanity check that we have enough space to serialize into
if size := Size(obj, fork); int(size) > len(buf) {
if size := SizeOnFork(obj, fork); int(size) > len(buf) {
return fmt.Errorf("%w: buffer %d bytes, object %d bytes", ErrBufferTooSmall, len(buf), size)
}
codec := encoderPool.Get().(*Codec)
Expand All @@ -144,10 +167,22 @@ func EncodeToBytes(buf []byte, obj Object, fork Fork) error {
return err
}

// DecodeFromStream parses an object with the given size out of a stream. Do not
// use this method with a bytes.Buffer to read from a []byte slice, as that will
// double the byte copying. For that use case, use DecodeFromBytes instead.
func DecodeFromStream(r io.Reader, obj Object, size uint32, fork Fork) error {
// DecodeFromStream parses a non-monolithic object with the given size out of a
// stream. If the type contains fork-specific rules, use DecodeFromStreamOnFork.
//
// Do not use this method with a bytes.Buffer to read from a []byte slice, as that
// will double the byte copying. For that use case, use DecodeFromBytes.
func DecodeFromStream(r io.Reader, obj Object, size uint32) error {
return DecodeFromStreamOnFork(r, obj, size, ForkUnknown)
}

// DecodeFromStreamOnFork parses a monolithic object with the given size out of
// a stream. If the type does not contain fork-specific rules, you can also use
// DecodeFromStream.
//
// Do not use this method with a bytes.Buffer to read from a []byte slice, as that
// will double the byte copying. For that use case, use DecodeFromBytesOnFork.
func DecodeFromStreamOnFork(r io.Reader, obj Object, size uint32, fork Fork) error {
// Retrieve a new decoder codec and set its data source
codec := decoderPool.Get().(*Codec)
defer decoderPool.Put(codec)
Expand Down Expand Up @@ -178,11 +213,23 @@ func DecodeFromStream(r io.Reader, obj Object, size uint32, fork Fork) error {
return err
}

// DecodeFromBytes parses an object from a byte buffer. Do not use this method
// if you want to first read the buffer from a stream via some reader, as that
// would double the memory use for the temporary buffer. For that use case, use
// DecodeFromStream instead.
func DecodeFromBytes(blob []byte, obj Object, fork Fork) error {
// DecodeFromBytes parses a non-monolithic object from a byte buffer. If the type
// contains fork-specific rules, use DecodeFromBytesOnFork.
//
// Do not use this method if you want to first read the buffer from a stream via
// some reader, as that would double the memory use for the temporary buffer. For
// that use case, use DecodeFromStream instead.
func DecodeFromBytes(blob []byte, obj Object) error {
return DecodeFromBytesOnFork(blob, obj, ForkUnknown)
}

// DecodeFromBytesOnFork parses a monolithic object from a byte buffer. If the
// type does not contain fork-specific rules, you can also use DecodeFromBytes.
//
// Do not use this method if you want to first read the buffer from a stream via
// some reader, as that would double the memory use for the temporary buffer. For
// that use case, use DecodeFromStreamOnFork instead.
func DecodeFromBytesOnFork(blob []byte, obj Object, fork Fork) error {
// Reject decoding from an empty slice
if len(blob) == 0 {
return io.ErrUnexpectedEOF
Expand Down Expand Up @@ -220,10 +267,21 @@ func DecodeFromBytes(blob []byte, obj Object, fork Fork) error {
return err
}

// HashSequential computes the ssz merkle root of the object on a single thread.
// This is useful for processing small objects with stable runtime and O(1) GC
// guarantees.
func HashSequential(obj Object, fork Fork) [32]byte {
// HashSequential computes the merkle root of a non-monolithic object on a single
// thread. This is useful for processing small objects with stable runtime and O(1)
// GC guarantees.
//
// If the type contains fork-specific rules, use HashSequentialOnFork.
func HashSequential(obj Object) [32]byte {
return HashSequentialOnFork(obj, ForkUnknown)
}

// HashSequentialOnFork computes the merkle root of a monolithic object on a single
// thread. This is useful for processing small objects with stable runtime and O(1)
// GC guarantees.
//
// If the type does not contain fork-specific rules, you can also use HashSequential.
func HashSequentialOnFork(obj Object, fork Fork) [32]byte {
codec := hasherPool.Get().(*Codec)
defer hasherPool.Put(codec)
defer codec.has.Reset()
Expand All @@ -240,11 +298,23 @@ func HashSequential(obj Object, fork Fork) [32]byte {
return codec.has.chunks[0]
}

// HashConcurrent computes the ssz merkle root of the object on potentially multiple
// concurrent threads (iff some data segments are large enough to be worth it). This
// is useful for processing large objects, but will place a bigger load on your CPU
// and GC; and might be more variable timing wise depending on other load.
func HashConcurrent(obj Object, fork Fork) [32]byte {
// HashConcurrent computes the merkle root of a non-monolithic object on potentially
// multiple concurrent threads (iff some data segments are large enough to be worth
// it). This is useful for processing large objects, but will place a bigger load on
// your CPU and GC; and might be more variable timing wise depending on other load.
//
// If the type contains fork-specific rules, use HashConcurrentOnFork.
func HashConcurrent(obj Object) [32]byte {
return HashConcurrentOnFork(obj, ForkUnknown)
}

// HashConcurrentOnFork computes the merkle root of a monolithic object on potentially
// multiple concurrent threads (iff some data segments are large enough to be worth
// it). This is useful for processing large objects, but will place a bigger load on
// your CPU and GC; and might be more variable timing wise depending on other load.
//
// If the type does not contain fork-specific rules, you can also use HashConcurrent.
func HashConcurrentOnFork(obj Object, fork Fork) [32]byte {
codec := hasherPool.Get().(*Codec)
defer hasherPool.Put(codec)
defer codec.has.Reset()
Expand All @@ -263,9 +333,16 @@ func HashConcurrent(obj Object, fork Fork) [32]byte {
return codec.has.chunks[0]
}

// Size retrieves the size of a ssz object, independent if it's a static or a
// dynamic one.
func Size(obj Object, fork Fork) uint32 {
// Size retrieves the size of a non-monolithic object, independent if it is static
// or dynamic. If the type contains fork-specific rules, use SizeOnFork.
func Size(obj Object) uint32 {
return SizeOnFork(obj, ForkUnknown)
}

// SizeOnFork retrieves the size of a monolithic object, independent if it is
// static or dynamic. If the type does not contain fork-specific rules, you can
// also use Size.
func SizeOnFork(obj Object, fork Fork) uint32 {
sizer := sizerPool.Get().(*Sizer)
defer sizerPool.Put(sizer)

Expand Down
Loading