@@ -2,32 +2,38 @@ package files
22
33import (
44 "context"
5- "io/fs"
6- "sync"
75 "sync/atomic"
86 "testing"
97 "time"
108
119 "github.com/google/uuid"
10+ "github.com/prometheus/client_golang/prometheus"
1211 "github.com/spf13/afero"
1312 "github.com/stretchr/testify/require"
1413 "golang.org/x/sync/errgroup"
1514
15+ "github.com/coder/coder/v2/coderd/coderdtest/promhelp"
1616 "github.com/coder/coder/v2/testutil"
1717)
1818
19+ func cachePromMetricName (metric string ) string {
20+ return "coderd_file_cache_" + metric
21+ }
22+
1923func TestConcurrency (t * testing.T ) {
2024 t .Parallel ()
2125
26+ const fileSize = 10
2227 emptyFS := afero .NewIOFS (afero .NewReadOnlyFs (afero .NewMemMapFs ()))
2328 var fetches atomic.Int64
24- c := newTestCache (func (_ context.Context , _ uuid.UUID ) (fs.FS , error ) {
29+ reg := prometheus .NewRegistry ()
30+ c := New (func (_ context.Context , _ uuid.UUID ) (cacheEntryValue , error ) {
2531 fetches .Add (1 )
2632 // Wait long enough before returning to make sure that all of the goroutines
2733 // will be waiting in line, ensuring that no one duplicated a fetch.
2834 time .Sleep (testutil .IntervalMedium )
29- return emptyFS , nil
30- })
35+ return cacheEntryValue { FS : emptyFS , size : fileSize } , nil
36+ }, reg )
3137
3238 batches := 1000
3339 groups := make ([]* errgroup.Group , 0 , batches )
@@ -55,15 +61,29 @@ func TestConcurrency(t *testing.T) {
5561 require .NoError (t , g .Wait ())
5662 }
5763 require .Equal (t , int64 (batches ), fetches .Load ())
64+
65+ // Verify all the counts & metrics are correct.
66+ require .Equal (t , batches , c .Count ())
67+ require .Equal (t , batches * fileSize , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_size_bytes_current" ), nil ))
68+ require .Equal (t , batches * fileSize , promhelp .CounterValue (t , reg , cachePromMetricName ("open_files_size_bytes_total" ), nil ))
69+ require .Equal (t , batches , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_current" ), nil ))
70+ require .Equal (t , batches , promhelp .CounterValue (t , reg , cachePromMetricName ("open_files_total" ), nil ))
71+ require .Equal (t , batches * batchSize , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_file_refs_current" ), nil ))
72+ require .Equal (t , batches * batchSize , promhelp .CounterValue (t , reg , cachePromMetricName ("open_file_refs_total" ), nil ))
5873}
5974
6075func TestRelease (t * testing.T ) {
6176 t .Parallel ()
6277
78+ const fileSize = 10
6379 emptyFS := afero .NewIOFS (afero .NewReadOnlyFs (afero .NewMemMapFs ()))
64- c := newTestCache (func (_ context.Context , _ uuid.UUID ) (fs.FS , error ) {
65- return emptyFS , nil
66- })
80+ reg := prometheus .NewRegistry ()
81+ c := New (func (_ context.Context , _ uuid.UUID ) (cacheEntryValue , error ) {
82+ return cacheEntryValue {
83+ FS : emptyFS ,
84+ size : fileSize ,
85+ }, nil
86+ }, reg )
6787
6888 batches := 100
6989 ids := make ([]uuid.UUID , 0 , batches )
@@ -73,32 +93,60 @@ func TestRelease(t *testing.T) {
7393
7494 // Acquire a bunch of references
7595 batchSize := 10
76- for _ , id := range ids {
77- for range batchSize {
96+ for openedIdx , id := range ids {
97+ for batchIdx := range batchSize {
7898 it , err := c .Acquire (t .Context (), id )
7999 require .NoError (t , err )
80100 require .Equal (t , emptyFS , it )
101+
102+ // Each time a new file is opened, the metrics should be updated as so:
103+ opened := openedIdx + 1
104+ // Number of unique files opened is equal to the idx of the ids.
105+ require .Equal (t , opened , c .Count ())
106+ require .Equal (t , opened , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_current" ), nil ))
107+ // Current file size is unique files * file size.
108+ require .Equal (t , opened * fileSize , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_size_bytes_current" ), nil ))
109+ // The number of refs is the current iteration of both loops.
110+ require .Equal (t , ((opened - 1 )* batchSize )+ (batchIdx + 1 ), promhelp .GaugeValue (t , reg , cachePromMetricName ("open_file_refs_current" ), nil ))
81111 }
82112 }
83113
84114 // Make sure cache is fully loaded
85115 require .Equal (t , len (c .data ), batches )
86116
87117 // Now release all of the references
88- for _ , id := range ids {
89- for range batchSize {
118+ for closedIdx , id := range ids {
119+ stillOpen := len (ids ) - closedIdx
120+ for closingIdx := range batchSize {
90121 c .Release (id )
122+
123+ // Each time a file is released, the metrics should decrement the file refs
124+ require .Equal (t , (stillOpen * batchSize )- (closingIdx + 1 ), promhelp .GaugeValue (t , reg , cachePromMetricName ("open_file_refs_current" ), nil ))
125+
126+ closed := closingIdx + 1 == batchSize
127+ if closed {
128+ continue
129+ }
130+
131+ // File ref still exists, so the counts should not change yet.
132+ require .Equal (t , stillOpen , c .Count ())
133+ require .Equal (t , stillOpen , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_current" ), nil ))
134+ require .Equal (t , stillOpen * fileSize , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_size_bytes_current" ), nil ))
91135 }
92136 }
93137
94138 // ...and make sure that the cache has emptied itself.
95139 require .Equal (t , len (c .data ), 0 )
96- }
97140
98- func newTestCache (fetcher func (context.Context , uuid.UUID ) (fs.FS , error )) Cache {
99- return Cache {
100- lock : sync.Mutex {},
101- data : make (map [uuid.UUID ]* cacheEntry ),
102- fetcher : fetcher ,
103- }
141+ // Verify all the counts & metrics are correct.
142+ // All existing files are closed
143+ require .Equal (t , 0 , c .Count ())
144+ require .Equal (t , 0 , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_size_bytes_current" ), nil ))
145+ require .Equal (t , 0 , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_files_current" ), nil ))
146+ require .Equal (t , 0 , promhelp .GaugeValue (t , reg , cachePromMetricName ("open_file_refs_current" ), nil ))
147+
148+ // Total counts remain
149+ require .Equal (t , batches * fileSize , promhelp .CounterValue (t , reg , cachePromMetricName ("open_files_size_bytes_total" ), nil ))
150+ require .Equal (t , batches , promhelp .CounterValue (t , reg , cachePromMetricName ("open_files_total" ), nil ))
151+ require .Equal (t , batches * batchSize , promhelp .CounterValue (t , reg , cachePromMetricName ("open_file_refs_total" ), nil ))
104152}
0 commit comments