-
Notifications
You must be signed in to change notification settings - Fork 145
aggregator committee: extend value checks #2682
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: boole-fork
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -10,6 +10,7 @@ import ( | |
| ssz "github.com/ferranbt/fastssz" | ||
| "github.com/pkg/errors" | ||
| specqbft "github.com/ssvlabs/ssv-spec/qbft" | ||
| "go.uber.org/zap" | ||
|
|
||
| spectypes "github.com/ssvlabs/ssv-spec/types" | ||
|
|
||
|
|
@@ -18,6 +19,7 @@ import ( | |
|
|
||
| func (b *BaseRunner) ValidatePreConsensusMsg( | ||
| ctx context.Context, | ||
| logger *zap.Logger, | ||
| runner Runner, | ||
| psigMsgs *spectypes.PartialSignatureMessages, | ||
| ) error { | ||
|
|
@@ -28,27 +30,40 @@ func (b *BaseRunner) ValidatePreConsensusMsg( | |
| return spectypes.WrapError(spectypes.NoRunningDutyErrorCode, ErrRunningDutyFinished) | ||
| } | ||
|
|
||
| // Validate the pre-consensus message differently depending on a message type. | ||
| validateMsg := func() error { | ||
| if err := b.validatePartialSigMsg(psigMsgs, b.State.CurrentDuty.DutySlot()); err != nil { | ||
| return err | ||
| if err := b.validatePartialSigMsg(psigMsgs, b.State.CurrentDuty.DutySlot()); err != nil { | ||
| return err | ||
| } | ||
|
|
||
| if runner.GetRole() == spectypes.RoleAggregatorCommittee { | ||
| aggRunner, ok := runner.(*AggregatorCommitteeRunner) | ||
| if !ok { | ||
| return fmt.Errorf("unexpected runner type %T for aggregator committee role", runner) | ||
| } | ||
|
|
||
| roots, domain, err := runner.expectedPreConsensusRootsAndDomain() | ||
| aggregatorMap, contributionMap, err := aggRunner.expectedPreConsensusRoots(ctx, logger) | ||
| if err != nil { | ||
| return fmt.Errorf("compute pre-consensus roots and domain: %w", err) | ||
| return fmt.Errorf("compute pre-consensus roots: %w", err) | ||
| } | ||
|
|
||
| return b.verifyExpectedRoot(ctx, runner, psigMsgs, roots, domain) | ||
| expectedRoots := make(map[[32]byte]struct{}) | ||
| for _, root := range aggregatorMap { | ||
| expectedRoots[root] = struct{}{} | ||
| } | ||
| for _, indexMap := range contributionMap { | ||
| for _, root := range indexMap { | ||
| expectedRoots[root] = struct{}{} | ||
| } | ||
| } | ||
|
|
||
| return b.verifyExpectedSigningRoots(psigMsgs, expectedRoots) | ||
|
Comment on lines
+37
to
+58
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I understand that you are trying to fend off attacks where a single validator can complete quorum on a single root, thus preventing selection proofs on all other roots? We had a discussion on this with @liorrutenberg and it was decided that we allow this attack to allow operators to have divergent views of validators (in case of EL issues). I just discussed with @MatheusFranco99 and maybe the correct solution is to actually wait on the preconsensus stage if no aggregator passes the selection proof
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, I'm adding a PR for it. |
||
| } | ||
|
|
||
| if runner.GetRole() == spectypes.RoleAggregatorCommittee { | ||
| validateMsg = func() error { | ||
| return b.validatePartialSigMsg(psigMsgs, b.State.CurrentDuty.DutySlot()) | ||
| } | ||
| roots, domain, err := runner.expectedPreConsensusRootsAndDomain() | ||
| if err != nil { | ||
| return fmt.Errorf("compute pre-consensus roots and domain: %w", err) | ||
| } | ||
|
|
||
| return validateMsg() | ||
| return b.verifyExpectedRoot(ctx, runner, psigMsgs, roots, domain) | ||
| } | ||
|
|
||
| // Verify each signature in container removing the invalid ones | ||
|
|
@@ -63,7 +78,12 @@ func (b *BaseRunner) FallBackAndVerifyEachSignature(container *ssv.PartialSigCon | |
| } | ||
| } | ||
|
|
||
| func (b *BaseRunner) ValidatePostConsensusMsg(ctx context.Context, runner Runner, psigMsgs *spectypes.PartialSignatureMessages) error { | ||
| func (b *BaseRunner) ValidatePostConsensusMsg( | ||
| ctx context.Context, | ||
| logger *zap.Logger, | ||
| runner Runner, | ||
| psigMsgs *spectypes.PartialSignatureMessages, | ||
| ) error { | ||
| if !b.hasDutyAssigned() { | ||
| return spectypes.WrapError(spectypes.NoRunningDutyErrorCode, ErrNoDutyAssigned) | ||
| } | ||
|
|
@@ -159,7 +179,31 @@ func (b *BaseRunner) ValidatePostConsensusMsg(ctx context.Context, runner Runner | |
| // Use b.State.CurrentDuty.DutySlot() since CurrentDuty never changes for AggregatorCommitteeRunner | ||
| // by design, hence there is no need to store slot number on decidedValue for AggregatorCommitteeRunner. | ||
| expectedSlot := b.State.CurrentDuty.DutySlot() | ||
| return b.validatePartialSigMsg(psigMsgs, expectedSlot) | ||
| if err := b.validatePartialSigMsg(psigMsgs, expectedSlot); err != nil { | ||
| return err | ||
| } | ||
|
|
||
| aggRunner, ok := runner.(*AggregatorCommitteeRunner) | ||
| if !ok { | ||
| return fmt.Errorf("unexpected runner type %T for aggregator committee role", runner) | ||
| } | ||
|
|
||
| aggregatorMap, contributionMap, _, err := aggRunner.expectedPostConsensusRootsAndBeaconObjects(ctx, logger) | ||
| if err != nil { | ||
| return fmt.Errorf("compute post-consensus roots: %w", err) | ||
| } | ||
|
|
||
| expectedRoots := make(map[[32]byte]struct{}) | ||
| for _, root := range aggregatorMap { | ||
| expectedRoots[root] = struct{}{} | ||
| } | ||
| for _, roots := range contributionMap { | ||
| for _, root := range roots { | ||
| expectedRoots[root] = struct{}{} | ||
| } | ||
| } | ||
|
|
||
| return b.verifyExpectedSigningRoots(psigMsgs, expectedRoots) | ||
|
Comment on lines
+182
to
+206
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think whatever conclusion we have on preconsensus validation will be also valid here |
||
| } | ||
| } | ||
|
|
||
|
|
@@ -235,3 +279,15 @@ func (b *BaseRunner) verifyExpectedRoot( | |
| } | ||
| return nil | ||
| } | ||
|
|
||
| func (b *BaseRunner) verifyExpectedSigningRoots( | ||
| psigMsgs *spectypes.PartialSignatureMessages, | ||
| expectedRoots map[[32]byte]struct{}, | ||
| ) error { | ||
| for _, msg := range psigMsgs.Messages { | ||
| if _, ok := expectedRoots[msg.SigningRoot]; !ok { | ||
| return spectypes.NewError(spectypes.RootHashInvalidErrorCode, "unexpected signing root") | ||
| } | ||
| } | ||
| return nil | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -78,10 +78,47 @@ func (v *voteChecker) CheckValue(value []byte) error { | |
| return nil | ||
| } | ||
|
|
||
| type aggregatorCommitteeChecker struct{} | ||
| type aggregatorCommitteeChecker struct { | ||
| allowedAggregators map[phase0.ValidatorIndex]map[phase0.CommitteeIndex]struct{} | ||
| allowedContributors map[phase0.ValidatorIndex]map[uint64]struct{} | ||
| } | ||
|
|
||
| type syncCommitteeSubnetIDProvider interface { | ||
| SyncCommitteeSubnetID(phase0.CommitteeIndex) uint64 | ||
| } | ||
|
|
||
| func NewAggregatorCommitteeChecker( | ||
| duty *spectypes.AggregatorCommitteeDuty, | ||
| subnetProvider syncCommitteeSubnetIDProvider, | ||
| ) ValueChecker { | ||
|
Comment on lines
+90
to
+93
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nil duty can panic
Consider returning a non-nil checker with empty allowlists (or returning an error from the constructor) when |
||
| allowedAggregators := make(map[phase0.ValidatorIndex]map[phase0.CommitteeIndex]struct{}) | ||
| allowedContributors := make(map[phase0.ValidatorIndex]map[uint64]struct{}) | ||
|
|
||
| for _, vDuty := range duty.ValidatorDuties { | ||
| switch vDuty.Type { | ||
| case spectypes.BNRoleAggregator: | ||
| if _, ok := allowedAggregators[vDuty.ValidatorIndex]; !ok { | ||
| allowedAggregators[vDuty.ValidatorIndex] = make(map[phase0.CommitteeIndex]struct{}) | ||
| } | ||
| allowedAggregators[vDuty.ValidatorIndex][(vDuty.CommitteeIndex)] = struct{}{} | ||
|
|
||
| case spectypes.BNRoleSyncCommitteeContribution: | ||
| if _, ok := allowedContributors[vDuty.ValidatorIndex]; !ok { | ||
| allowedContributors[vDuty.ValidatorIndex] = make(map[uint64]struct{}) | ||
| } | ||
| for _, index := range vDuty.ValidatorSyncCommitteeIndices { | ||
| subnet := subnetProvider.SyncCommitteeSubnetID(phase0.CommitteeIndex(index)) | ||
| allowedContributors[vDuty.ValidatorIndex][subnet] = struct{}{} | ||
| } | ||
| default: | ||
| // Other duty types are unexpected | ||
| } | ||
| } | ||
|
|
||
| func NewAggregatorCommitteeChecker() ValueChecker { | ||
| return &aggregatorCommitteeChecker{} | ||
| return &aggregatorCommitteeChecker{ | ||
| allowedAggregators: allowedAggregators, | ||
| allowedContributors: allowedContributors, | ||
| } | ||
| } | ||
|
|
||
| func (v *aggregatorCommitteeChecker) CheckValue(value []byte) error { | ||
|
|
@@ -96,6 +133,46 @@ func (v *aggregatorCommitteeChecker) CheckValue(value []byte) error { | |
| return fmt.Errorf("invalid value: %w", err) | ||
| } | ||
|
|
||
| if len(cd.Aggregators) == 0 && len(cd.Contributors) == 0 { | ||
| return spectypes.WrapError( | ||
| spectypes.AggCommConsensusDataNoValidatorErrorCode, | ||
| fmt.Errorf("no aggregators or sync committee contributors in consensus data"), | ||
| ) | ||
| } | ||
|
|
||
| for _, agg := range cd.Aggregators { | ||
| allowedByValidator, ok := v.allowedAggregators[agg.ValidatorIndex] | ||
| if !ok { | ||
| return spectypes.NewError( | ||
| spectypes.QBFTValueInvalidErrorCode, | ||
| fmt.Sprintf("unexpected aggregator validator %d", agg.ValidatorIndex), | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. hi, bringing back the divergent views concern from the parent PR (#2503, #2503 (comment) by @MatheusFranco99). Since operators can have different views of the aggregator lists (i.e. different or failed BN responses, or some operators haven't processed a ValidatorAdded event yet), strict rejection here would stall consensus. The same issue applies to verifyExpectedSigningRoots I think -> an unknown validator's signing root causes the entire partial sig batch to be dropped, including valid sigs for validators the operator does know about. Matheus's suggestion of only rejecting validators that are known to the operator but shouldn't have that duty rather than simply unknown seems like an ok tradeoff
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @shane-moore Hi, thanks
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Correct, we can't do validator id checks 👍 |
||
| ) | ||
| } | ||
| if _, ok := allowedByValidator[phase0.CommitteeIndex(agg.CommitteeIndex)]; !ok { | ||
| return spectypes.NewError( | ||
| spectypes.QBFTValueInvalidErrorCode, | ||
| fmt.Sprintf("unexpected aggregator committee index %d for validator %d", agg.CommitteeIndex, agg.ValidatorIndex), | ||
| ) | ||
| } | ||
| } | ||
|
|
||
| for _, contrib := range cd.Contributors { | ||
| allowedByValidator, ok := v.allowedContributors[contrib.ValidatorIndex] | ||
| if !ok { | ||
| return spectypes.NewError( | ||
| spectypes.QBFTValueInvalidErrorCode, | ||
| fmt.Sprintf("unexpected contributor validator %d", contrib.ValidatorIndex), | ||
| ) | ||
| } | ||
| subnetID := contrib.CommitteeIndex | ||
| if _, ok := allowedByValidator[subnetID]; !ok { | ||
| return spectypes.NewError( | ||
| spectypes.QBFTValueInvalidErrorCode, | ||
| fmt.Sprintf("unexpected contributor subnet %d for validator %d", subnetID, contrib.ValidatorIndex), | ||
| ) | ||
| } | ||
| } | ||
|
Comment on lines
+142
to
+174
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do you see any good motivation for having this check @MatheusFranco99? We need more than
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Agree |
||
|
|
||
| return nil | ||
| } | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nil ValCheck until pre-consensus
NewAggregatorCommitteeRunnerno longer initializesValCheck, and it’s only set inProcessPreConsensusright beforedecide. If any code path callsProcessConsensus/ProcessPostConsensusbeforeProcessPreConsensusfor the duty,r.ValCheck.CheckValuewill panic.If out-of-order message processing is possible in this runner (as it is elsewhere), initialize
ValCheckin the constructor (with duty-independent checks) or guardProcessConsensus/ProcessPostConsensuswith a clear error whenr.ValCheck == nil.