Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
[Unreleased]

* [BUGFIX] Reject object, interface, and input object type definitions that declare zero fields/input values (spec compliance).
* [IMPROVEMENT] Optimize overlapping field validation to avoid quadratic memory blowups on large sibling field lists.
* [FEATURE] Add configurable safety valve for overlapping field comparison count with `OverlapValidationLimit(n)` schema option (0 disables the cap). When exceeded validation aborts early with rule `OverlapValidationLimitExceeded`. Disabled by default.
* [TEST] Add benchmarks & randomized overlap stress test for mixed field/fragment patterns.

[v1.7.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.7.0) Release v1.7.0

Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ schema := graphql.MustParseSchema(sdl, &RootResolver{}, nil)
- `PanicHandler(panicHandler errors.PanicHandler)` is used to transform panics into errors during query execution. It defaults to `errors.DefaultPanicHandler`.
- `DisableIntrospection()` disables introspection queries.
- `DisableFieldSelections()` disables capturing child field selections used by helper APIs (see below).
- `OverlapValidationLimit(n int)` sets a hard cap on examined overlap pairs during validation; exceeding it emits `OverlapValidationLimitExceeded` error.

### Field Selection Inspection Helpers

Expand Down
13 changes: 11 additions & 2 deletions graphql.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ type Schema struct {
subscribeResolverTimeout time.Duration
useFieldResolvers bool
disableFieldSelections bool
overlapPairLimit int
}

// AST returns the abstract syntax tree of the GraphQL schema definition.
Expand Down Expand Up @@ -152,6 +153,14 @@ func MaxQueryLength(n int) SchemaOpt {
}
}

// OverlapValidationLimit caps the number of overlapping selection pairs that will be examined
// during validation of a single operation (including fragments). A value of 0 disables the cap.
// When the cap is exceeded validation aborts early with an error (rule: OverlapValidationLimitExceeded)
// to protect against maliciously constructed queries designed to exhaust memory/CPU.
func OverlapValidationLimit(n int) SchemaOpt {
return func(s *Schema) { s.overlapPairLimit = n }
}

// Tracer is used to trace queries and fields. It defaults to [noop.Tracer].
func Tracer(t tracer.Tracer) SchemaOpt {
return func(s *Schema) {
Expand Down Expand Up @@ -247,7 +256,7 @@ func (s *Schema) ValidateWithVariables(queryString string, variables map[string]
return []*errors.QueryError{errors.Errorf("executable document must contain at least one operation")}
}

return validation.Validate(s.schema, doc, variables, s.maxDepth)
return validation.Validate(s.schema, doc, variables, s.maxDepth, s.overlapPairLimit)
}

// Exec executes the given query with the schema's resolver. It panics if the schema was created
Expand All @@ -270,7 +279,7 @@ func (s *Schema) exec(ctx context.Context, queryString string, operationName str
}

validationFinish := s.validationTracer.TraceValidation(ctx)
errs := validation.Validate(s.schema, doc, variables, s.maxDepth)
errs := validation.Validate(s.schema, doc, variables, s.maxDepth, s.overlapPairLimit)
validationFinish(errs)
if len(errs) != 0 {
return &Response{Errors: errs}
Expand Down
85 changes: 85 additions & 0 deletions internal/validation/overlap_fuzz_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
package validation_test

import (
"math/rand"
"testing"
"time"

"github.com/graph-gophers/graphql-go/internal/query"
"github.com/graph-gophers/graphql-go/internal/schema"
v "github.com/graph-gophers/graphql-go/internal/validation"
)

// FuzzValidateOverlapMixed exercises the overlap validation logic with randomly generated queries
// containing many sibling fields and fragment spreads to ensure it does not panic or explode in memory.
// It uses a modest overlap pair cap to keep each iteration bounded.
func FuzzValidateOverlapMixed(f *testing.F) {
baseQueries := []string{
"query{root{id}}",
"query Q{root{id name}}",
}
for _, q := range baseQueries {
f.Add(q)
}

s := schema.New()
_ = schema.Parse(s, `schema{query:Query} type Query{root: Thing} type Thing { id: ID name: String value: String }`, false)

randSource := rand.New(rand.NewSource(time.Now().UnixNano()))

f.Fuzz(func(t *testing.T, seed string) {
// Use hash of seed to deterministically generate but bound complexity.
r := rand.New(rand.NewSource(int64(len(seed)) + randSource.Int63()))
fieldCount := 50 + r.Intn(150) // 50-199
fragCount := 1 + r.Intn(5)

// Build fragments.
fragBodies := make([]string, fragCount)
for i := 0; i < fragCount; i++ {
// each fragment gets subset of fields
var body string
innerFields := 5 + r.Intn(20)
for j := 0; j < innerFields; j++ {
body += " f" + nameIdx(r.Intn(500)) + ":id"
}
fragBodies[i] = "fragment F" + nameIdx(i) + " on Thing{" + body + " }"
}

// Root selection
sel := "query{root{"
for i := 0; i < fieldCount; i++ {
sel += " a" + nameIdx(r.Intn(1000)) + ":id"
}
// Sprinkle fragment spreads
for i := 0; i < fragCount; i++ {
sel += " ...F" + nameIdx(i)
}
sel += "}}"
queryText := sel
for _, fb := range fragBodies {
queryText += fb
}

doc, err := query.Parse(queryText)
if err != nil {
return
} // parser fuzzing not our goal
if len(doc.Operations) == 0 {
return
}
// Use overlap limit to bound cost.
errs := v.Validate(s, doc, nil, 0, 10_000)
// Ensure no panic (implicit). Optionally sanity check: errors slice must not be ridiculously huge.
if len(errs) > 1000 {
t.Fatalf("too many errors: %d", len(errs))
}
})
}

func nameIdx(i int) string {
const letters = "abcdefghijklmnopqrstuvwxyz"
if i < len(letters) {
return string(letters[i])
}
return string(letters[i%len(letters)]) + nameIdx(i/len(letters))
}
4 changes: 2 additions & 2 deletions internal/validation/validate_max_depth_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func (tc maxDepthTestCase) Run(t *testing.T, s *ast.Schema) {
t.Fatal(qErr)
}

errs := Validate(s, doc, nil, tc.depth)
errs := Validate(s, doc, nil, tc.depth, 0)
if len(tc.expectedErrors) > 0 {
if len(errs) > 0 {
for _, expected := range tc.expectedErrors {
Expand Down Expand Up @@ -489,7 +489,7 @@ func TestMaxDepthValidation(t *testing.T) {
t.Fatal(err)
}

context := newContext(s, doc, tc.maxDepth)
context := newContext(s, doc, tc.maxDepth, 0)
op := doc.Operations[0]

opc := &opContext{context: context, ops: doc.Operations}
Expand Down
171 changes: 154 additions & 17 deletions internal/validation/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,17 @@ type fieldInfo struct {
}

type context struct {
schema *ast.Schema
doc *ast.ExecutableDefinition
errs []*errors.QueryError
opErrs map[*ast.OperationDefinition][]*errors.QueryError
usedVars map[*ast.OperationDefinition]varSet
fieldMap map[*ast.Field]fieldInfo
overlapValidated map[selectionPair]struct{}
maxDepth int
schema *ast.Schema
doc *ast.ExecutableDefinition
errs []*errors.QueryError
opErrs map[*ast.OperationDefinition][]*errors.QueryError
usedVars map[*ast.OperationDefinition]varSet
fieldMap map[*ast.Field]fieldInfo
overlapValidated map[selectionPair]struct{}
maxDepth int
overlapPairLimit int
overlapPairsObserved int
overlapLimitHit bool
}

func (c *context) addErr(loc errors.Location, rule string, format string, a ...interface{}) {
Expand All @@ -53,7 +56,7 @@ type opContext struct {
ops []*ast.OperationDefinition
}

func newContext(s *ast.Schema, doc *ast.ExecutableDefinition, maxDepth int) *context {
func newContext(s *ast.Schema, doc *ast.ExecutableDefinition, maxDepth int, overlapPairLimit int) *context {
return &context{
schema: s,
doc: doc,
Expand All @@ -62,11 +65,12 @@ func newContext(s *ast.Schema, doc *ast.ExecutableDefinition, maxDepth int) *con
fieldMap: make(map[*ast.Field]fieldInfo),
overlapValidated: make(map[selectionPair]struct{}),
maxDepth: maxDepth,
overlapPairLimit: overlapPairLimit,
}
}

func Validate(s *ast.Schema, doc *ast.ExecutableDefinition, variables map[string]interface{}, maxDepth int) []*errors.QueryError {
c := newContext(s, doc, maxDepth)
func Validate(s *ast.Schema, doc *ast.ExecutableDefinition, variables map[string]interface{}, maxDepth int, overlapPairLimit int) []*errors.QueryError {
c := newContext(s, doc, maxDepth, overlapPairLimit)

opNames := make(nameSet, len(doc.Operations))
fragUsedBy := make(map[*ast.FragmentDefinition][]*ast.OperationDefinition)
Expand Down Expand Up @@ -303,13 +307,76 @@ func validateMaxDepth(c *opContext, sels []ast.Selection, visited map[*ast.Fragm
}

func validateSelectionSet(c *opContext, sels []ast.Selection, t ast.NamedType) {
if len(sels) == 0 {
return
}

// First pass: validate each selection and bucket fields by response name (alias or name).
fieldGroups := make(map[string][]ast.Selection)
var fragments []ast.Selection // fragment spreads & inline fragments
for _, sel := range sels {
if c.overlapLimitHit {
return
}
validateSelection(c, sel, t)
switch s := sel.(type) {
case *ast.Field:
name := s.Alias.Name
if name == "" {
name = s.Name.Name
}
fieldGroups[name] = append(fieldGroups[name], sel)
default:
fragments = append(fragments, sel)
}
}

for i, a := range sels {
for _, b := range sels[i+1:] {
c.validateOverlap(a, b, nil, nil)
// Compare fields only within same response name group (was O(n^2) across all fields previously).
for _, group := range fieldGroups {
if c.overlapLimitHit {
break
}
if len(group) < 2 {
continue
}
for i, a := range group {
if c.overlapLimitHit {
break
}
for _, b := range group[i+1:] {
if c.overlapLimitHit {
break
}
c.validateOverlap(a, b, nil, nil)
}
}
}

// Fragments can introduce any field names, so we must compare them with all fields and each other.
if len(fragments) > 0 && !c.overlapLimitHit {
// Flatten fields for fragment comparison.
var allFields []ast.Selection
for _, group := range fieldGroups {
allFields = append(allFields, group...)
}
for i, fa := range fragments {
if c.overlapLimitHit {
break
}
// Compare fragment with all fields
for _, fld := range allFields {
if c.overlapLimitHit {
break
}
c.validateOverlap(fa, fld, nil, nil)
}
// Compare fragment with following fragments
for _, fb := range fragments[i+1:] {
if c.overlapLimitHit {
break
}
c.validateOverlap(fa, fb, nil, nil)
}
}
}
}
Expand Down Expand Up @@ -523,11 +590,38 @@ func (c *context) validateOverlap(a, b ast.Selection, reasons *[]string, locs *[
return
}

if _, ok := c.overlapValidated[selectionPair{a, b}]; ok {
// Optimisation 1: store only one direction of the pair to halve memory and lookups.
pa := reflect.ValueOf(a).Pointer()
pb := reflect.ValueOf(b).Pointer()
if pb < pa { // canonical ordering
a, b = b, a
}
key := selectionPair{a: a, b: b}
if _, ok := c.overlapValidated[key]; ok {
return
}
c.overlapValidated[selectionPair{a, b}] = struct{}{}
c.overlapValidated[selectionPair{b, a}] = struct{}{}
c.overlapValidated[key] = struct{}{}

if c.overlapPairLimit > 0 && !c.overlapLimitHit {
c.overlapPairsObserved++
if c.overlapPairsObserved > c.overlapPairLimit {
c.overlapLimitHit = true
// determine a representative location for error reporting
var loc errors.Location
switch sel := a.(type) {
case *ast.Field:
loc = sel.Alias.Loc
case *ast.InlineFragment:
loc = sel.Loc
case *ast.FragmentSpread:
loc = sel.Loc
default:
// leave zero value
}
c.addErr(loc, "OverlapValidationLimitExceeded", "Overlapping field validation aborted after examining %d pairs (limit %d). Consider restructuring the query or increasing the limit.", c.overlapPairsObserved-1, c.overlapPairLimit)
return
}
}

switch a := a.(type) {
case *ast.Field:
Expand Down Expand Up @@ -608,11 +702,54 @@ func (c *context) validateFieldOverlap(a, b *ast.Field) ([]string, []errors.Loca

var reasons []string
var locs []errors.Location

// Fast-path: if either side has no subselections we are done.
if len(a.SelectionSet) == 0 || len(b.SelectionSet) == 0 {
return nil, nil
}

// Optimisation 2: avoid O(m*n) cartesian product for large sibling lists with mostly
// distinct response names (common & exploitable for DoS). Instead, index B's field
// selections by response name (alias/name). For each field in A we only compare
// against fields in B with the same response name plus all fragment spreads / inline
// fragments (which can expand to any field names and must be compared exhaustively).
bFieldIndex := make(map[string][]ast.Selection, len(b.SelectionSet))
var bNonField []ast.Selection
for _, bs := range b.SelectionSet {
if f, ok := bs.(*ast.Field); ok {
name := f.Alias.Name
if name == "" { // alias may be empty, fall back to field name
name = f.Name.Name
}
bFieldIndex[name] = append(bFieldIndex[name], bs)
continue
}
bNonField = append(bNonField, bs)
}

for _, a2 := range a.SelectionSet {
if af, ok := a2.(*ast.Field); ok {
name := af.Alias.Name
if name == "" {
name = af.Name.Name
}
// Compare only against same-name fields + all non-field selections.
if matches := bFieldIndex[name]; len(matches) != 0 {
for _, bMatch := range matches {
c.validateOverlap(a2, bMatch, &reasons, &locs)
}
}
for _, bnf := range bNonField {
c.validateOverlap(a2, bnf, &reasons, &locs)
}
continue
}
// For fragments / inline fragments we still need to compare against every selection in B.
for _, b2 := range b.SelectionSet {
c.validateOverlap(a2, b2, &reasons, &locs)
}
}

return reasons, locs
}

Expand Down
2 changes: 1 addition & 1 deletion internal/validation/validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func TestValidate(t *testing.T) {
if err != nil {
t.Fatalf("failed to parse query: %s", err)
}
errs := validation.Validate(schemas[test.Schema], d, test.Vars, 0)
errs := validation.Validate(schemas[test.Schema], d, test.Vars, 0, 0)
got := []*errors.QueryError{}
for _, err := range errs {
if err.Rule == test.Rule {
Expand Down
Loading
Loading