@@ -21,26 +21,27 @@ type KCOVState struct {
21
21
cover []byte
22
22
}
23
23
24
- // Trace invokes `f` and returns its result, as well as collected kernel
25
- // coverage during its invocation.
26
- func (st * KCOVState ) Trace (f func () error ) ([]uintptr , error ) {
24
+ type KCOVTraceResult struct {
25
+ Result error // Result of the call.
26
+ Coverage []uintptr // Collected program counters.
27
+ }
28
+
29
+ // Trace invokes `f` and returns a KCOVTraceResult.
30
+ func (st * KCOVState ) Trace (f func () error ) KCOVTraceResult {
27
31
// First 8 bytes holds the number of collected PCs since last poll.
28
- countPtr := (* uint64 )(unsafe .Pointer (& st .cover [0 ]))
32
+ countPtr := (* uintptr )(unsafe .Pointer (& st .cover [0 ]))
29
33
// Reset coverage for this run.
30
- atomic .StoreUint64 (countPtr , 0 )
34
+ atomic .StoreUintptr (countPtr , 0 )
31
35
// Trigger call.
32
36
err := f ()
33
37
// Load the number of PCs that were hit during trigger.
34
- n := atomic .LoadUint64 (countPtr )
35
- if n == 0 {
36
- return nil , nil
37
- }
38
+ n := atomic .LoadUintptr (countPtr )
38
39
39
40
pcDataPtr := (* uintptr )(unsafe .Pointer (& st .cover [sizeofUintPtr ]))
40
41
pcs := unsafe .Slice (pcDataPtr , n )
41
42
pcsCopy := make ([]uintptr , n )
42
43
copy (pcsCopy , pcs )
43
- return pcsCopy , err
44
+ return KCOVTraceResult { Result : err , Coverage : pcsCopy }
44
45
}
45
46
46
47
// EnableTracingForCurrentGoroutine prepares the current goroutine for kcov tracing.
@@ -53,47 +54,42 @@ func EnableTracingForCurrentGoroutine() (*KCOVState, error) {
53
54
if err != nil {
54
55
return nil , err
55
56
}
56
-
57
- cleanupOnError := func () {
58
- file .Close ()
59
- runtime .UnlockOSThread ()
57
+ st := KCOVState {
58
+ file : file ,
60
59
}
61
-
62
60
fd := file .Fd ()
63
61
64
62
// Setup trace mode and size.
65
63
if err := unix .IoctlSetInt (int (fd ), uint (KCOV_INIT_TRACE ), kcovCoverSize ); err != nil {
66
- cleanupOnError ()
64
+ st . DisableTracing ()
67
65
return nil , err
68
66
}
69
67
70
- // Mmap buffer shared between kernel- and user-space.
71
- coverageBuffer , err := unix .Mmap (
68
+ // Mmap buffer shared between kernel- and user-space. For more information,
69
+ // see the Linux KCOV documentation: https://docs.kernel.org/dev-tools/kcov.html.
70
+ st .cover , err = unix .Mmap (
72
71
int (fd ),
73
- 0 , // offset
72
+ 0 , // Offset.
74
73
kcovCoverSize * sizeofUintPtr ,
75
- unix .PROT_READ | unix .PROT_WRITE , // a read/write mapping
76
- unix .MAP_SHARED , // changes are shared with the kernel
74
+ unix .PROT_READ | unix .PROT_WRITE ,
75
+ unix .MAP_SHARED ,
77
76
)
78
77
if err != nil {
79
- cleanupOnError ()
78
+ st . DisableTracing ()
80
79
return nil , err
81
80
}
82
81
83
82
// Enable coverage collection on the current thread.
84
83
if err := unix .IoctlSetInt (int (fd ), uint (KCOV_ENABLE ), KCOV_TRACE_PC ); err != nil {
85
- cleanupOnError ()
86
- unix .Munmap (coverageBuffer )
84
+ st .DisableTracing ()
87
85
return nil , err
88
86
}
89
87
90
- return & KCOVState {
91
- file : file ,
92
- cover : coverageBuffer ,
93
- }, nil
88
+ return & st , nil
94
89
}
95
90
96
91
func (st * KCOVState ) DisableTracing () {
97
92
runtime .UnlockOSThread ()
98
93
st .file .Close ()
94
+ unix .Munmap (st .cover )
99
95
}
0 commit comments