1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
//! A cache system that uses local in-memory storage to store a limited number of items.
//!
//! The cache system here is actually two tiered. The first tier maps from
//! suggestion requests to the hash of the response they should use. The second
//! tier maps from those hashes to the responses. In this way, duplicate
//! responses can be stored only once, even if they are used for many requests.

use anyhow::anyhow;
use async_trait::async_trait;
use cadence::{CountedExt, Gauged, StatsdClient, Timed};
use deduped_dashmap::{ControlFlow, DedupedMap};
use lazy_static::lazy_static;
use merino_settings::providers::MemoryCacheConfig;
use merino_suggest_traits::{
    convert_config, metrics::TimedMicros, reconfigure_or_remake, CacheInputs, CacheStatus,
    MakeFreshType, NullProvider, SetupError, Suggestion, SuggestionProvider, SuggestionRequest,
    SuggestionResponse,
};
use std::{
    collections::HashMap,
    sync::Arc,
    time::{Duration, Instant},
};
use tracing::Instrument;

use arc_swap::ArcSwap;

lazy_static! {
    static ref LOCK_TABLE: ArcSwap<HashMap<String, Instant>> =
        ArcSwap::from_pointee(HashMap::new());
}

impl LOCK_TABLE {
    /// Check to see if there's any lock for a given key
    fn is_locked(&self, key: &str) -> bool {
        if let Some(lock_val) = self.load().get(key) {
            return *lock_val > Instant::now();
        }
        false
    }

    /// Generate a lock for the given key and timeout
    fn add_lock(&self, key: &str, lock_timeout: Duration) -> Instant {
        let lock = Instant::now() + lock_timeout;
        self.rcu(|table| {
            let mut locked = HashMap::clone(table);
            locked.insert(key.to_owned(), lock);
            locked
        });
        lock
    }

    /// run func and remove lock, only if the lock we have matches what
    /// is registered for the key.
    fn update<F>(&self, key: &str, lock: Instant, mut func: F)
    where
        F: FnMut(),
    {
        self.rcu(|table| {
            let mut locked = HashMap::clone(table);
            if let Some(ts) = locked.get(key) {
                if *ts == lock {
                    func();
                }
                locked.remove(key);
            }
            locked
        });
    }

    /// remove any expired elements from the Pending table
    /// (There shouldn't be many.)
    fn prune(&self, start: &Instant) {
        self.rcu(|table| {
            let mut cleaned = HashMap::clone(table);
            cleaned.retain(|_k, v| *v > *start);
            cleaned
        });
    }
}

/// A in-memory cache for suggestions.
pub struct Suggester {
    /// The suggester to query on cache-miss.
    inner: Box<dyn SuggestionProvider>,

    /// The Statsd client used to record statistics.
    metrics_client: StatsdClient,

    /// The cached items.
    items: Arc<DedupedMap<String, Instant, Vec<Suggestion>>>,

    /// TTL to apply to items if the underlying provider does not give one.
    default_ttl: Duration,

    /// TTL for locks on cache refresh updates
    default_lock_timeout: Duration,

    /// Maximum number of entries to remove in a single background expiration iteration.
    max_background_removals: usize,
}

impl Suggester {
    /// Create a in-memory suggestion cache from settings that wraps `provider`.
    #[must_use]
    pub fn new_boxed(
        config: MemoryCacheConfig,
        provider: Box<dyn SuggestionProvider>,
        metrics_client: StatsdClient,
    ) -> Box<Self> {
        let suggester = Self {
            inner: provider,
            metrics_client,
            items: Arc::new(DedupedMap::new()),
            default_ttl: config.default_ttl,
            default_lock_timeout: config.default_lock_timeout,
            max_background_removals: config.max_removed_entries,
        };

        {
            let cloned_suggester = suggester.clone_with_null_provider();
            let task_interval = config.cleanup_interval;
            tokio::spawn(async move {
                let mut timer = tokio::time::interval(task_interval);
                // The timer fires immediately, but we don't want to run the
                // cleanup function immediately, so wait one tick before
                // starting the loop.
                timer.tick().await;
                loop {
                    timer.tick().await;
                    let mut cache = cloned_suggester.clone_with_null_provider();

                    // Dispatch the expiry task to the blocking threads of the
                    // runtime. This prevents the expiry task, which is inherently
                    // blocking, from blocking the other tasks running on the
                    // core threads of the runtime.
                    //
                    // Wait for the task to finish so that only one expiry task
                    // is allowed to run.
                    let _ = tokio::task::spawn_blocking(move || {
                        cache.remove_expired_entries();
                    })
                    .await;
                }
            });
        }

        Box::new(suggester)
    }

    /// Remove expired entries from `items`
    #[tracing::instrument(level = "debug", skip(self))]
    fn remove_expired_entries(&mut self) {
        let start = Instant::now();
        let count_before_storage = self.items.len_storage();
        let count_before_pointers = self.items.len_pointers();

        // Retain all cache entries that have not yet expired.
        let mut num_removals = 0;
        self.items.retain(|_key, expiration, _suggestions| {
            if num_removals > self.max_background_removals {
                tracing::warn!(
                    r#type = "cache.memory.max-removals",
                    ?self.max_background_removals,
                    "memory-cache cleanup reached max number of removed entries"
                );
                return ControlFlow::Break;
            }

            let should_remove = *expiration < start;
            if should_remove {
                num_removals += 1;
            }
            ControlFlow::Continue(!should_remove)
        });

        LOCK_TABLE.prune(&start);

        // Report finishing.
        let duration = Instant::now() - start;
        let removed_storage = count_before_storage - self.items.len_storage();
        let removed_pointers = count_before_pointers - self.items.len_pointers();
        tracing::info!(
            r#type = "cache.memory.remove-expired",
            ?duration,
            ?removed_pointers,
            ?removed_storage,
            "finished removing expired entries from cache"
        );

        self.metrics_client
            .gauge("cache.memory.storage-len", self.items.len_storage() as u64)
            .ok();
        self.metrics_client
            .gauge(
                "cache.memory.pointers-len",
                self.items.len_pointers() as u64,
            )
            .ok();
        self.metrics_client
            .time("cache.memory.duration", duration)
            .ok();
    }

    /// It is often useful to have access to most of the fields on this object,
    /// but providers are not generally cloneable, so `self.inner` cannot be
    /// cloned.
    ///
    /// Introducing an Arc around `inner` complicates things, but the situations
    /// where we want clones of this object don't need the inner provider. So
    /// perform a partial clone that clones most fields, replacing the provider
    /// with a dummy value.
    fn clone_with_null_provider(&self) -> Self {
        Self {
            inner: Box::new(NullProvider),
            metrics_client: self.metrics_client.clone(),
            items: self.items.clone(),
            default_ttl: self.default_ttl,
            default_lock_timeout: self.default_lock_timeout,
            max_background_removals: self.max_background_removals,
        }
    }
}

#[async_trait]
impl SuggestionProvider for Suggester {
    fn name(&self) -> String {
        format!("MemoryCache({})", self.inner.name())
    }

    fn cache_inputs(&self, req: &SuggestionRequest, cache_inputs: &mut dyn CacheInputs) {
        self.inner.cache_inputs(req, cache_inputs);
    }

    async fn suggest(
        &self,
        query: SuggestionRequest,
    ) -> Result<SuggestionResponse, merino_suggest_traits::SuggestError> {
        let now = Instant::now();
        let key = self.cache_key(&query);
        let span = tracing::debug_span!("memory-suggest", ?key);

        // closure for `span`.
        async move {
            tracing::debug!("suggesting with memory cache");
            let mut rv = None;

            match self.items.get(&key) {
                Some((expiration, _)) if expiration <= now => {
                    tracing::debug!("cache expired");
                    self.items.remove(key.clone());
                }
                Some((expiration, suggestions)) => {
                    tracing::debug!("cache hit");
                    self.metrics_client.incr("cache.memory.hit").ok();
                    rv = Some(SuggestionResponse {
                        cache_status: CacheStatus::Hit,
                        cache_ttl: Some(expiration - now),
                        suggestions,
                    });
                }
                None => {
                    tracing::debug!("cache miss");
                    self.metrics_client.incr("cache.memory.miss").ok();
                }
            }

            if rv.is_none() {
                if LOCK_TABLE.is_locked(&key) {
                    // There's a fetch already in progress. Return empty for now.
                    rv = Some(SuggestionResponse {
                        cache_status: CacheStatus::Hit,
                        cache_ttl: None,
                        suggestions: Vec::new(),
                    });
                } else {
                    // Handle cache miss or stale cache.
                    let lock = LOCK_TABLE.add_lock(&key, self.default_lock_timeout);
                    let mut response = self
                        .inner
                        .suggest(query)
                        .await?
                        .with_cache_status(CacheStatus::Miss);

                    LOCK_TABLE.update(&key, lock, || {
                        // Update the cache data.
                        let cache_ttl = response.cache_ttl.get_or_insert(self.default_ttl);
                        let expiration = now + *cache_ttl;
                        tracing::debug!(?now, ?expiration, "inserting into cache");
                        self.items
                            .insert(key.clone(), expiration, response.suggestions.clone());
                    });

                    rv = Some(response);
                }
            }

            if let Some(response) = rv {
                self.metrics_client
                    .time_micros_with_tags("cache.memory.duration-us", now.elapsed())
                    .with_tag("cache-status", response.cache_status.to_string().as_str())
                    .send();
                Ok(response)
            } else {
                Err(merino_suggest_traits::SuggestError::Internal(anyhow!(
                    "No result generated"
                )))
            }
        }
        .instrument(span)
        .await
    }

    async fn reconfigure(
        &mut self,
        new_config: serde_json::Value,
        make_fresh: &MakeFreshType,
    ) -> Result<(), SetupError> {
        let new_config: MemoryCacheConfig = convert_config(new_config)?;

        self.default_ttl = new_config.default_ttl;
        self.default_lock_timeout = new_config.default_lock_timeout;
        self.max_background_removals = new_config.max_removed_entries;

        reconfigure_or_remake(&mut self.inner, *new_config.inner, make_fresh).await?;

        Ok(())
    }
}

#[cfg(test)]
mod tests {
    use super::{Suggester, LOCK_TABLE};
    use cadence::{SpyMetricSink, StatsdClient};
    use deduped_dashmap::DedupedMap;
    use fake::{Fake, Faker};
    use merino_settings::providers::{MemoryCacheConfig, SuggestionProviderConfig};
    use merino_suggest_traits::{MakeFreshType, NullProvider, Suggestion, SuggestionProvider};
    use std::{
        sync::Arc,
        time::{Duration, Instant},
    };

    #[test]
    fn cache_maintainer_removes_expired_entries() {
        let cache: Arc<DedupedMap<String, Instant, Vec<Suggestion>>> = Arc::new(DedupedMap::new());

        let suggestions = vec![Faker.fake()];
        cache.insert(
            "expired".to_string(),
            Instant::now() - Duration::from_secs(300),
            suggestions.clone(),
        );
        cache.insert(
            "current".to_string(),
            Instant::now() + Duration::from_secs(300),
            suggestions,
        );
        assert_eq!(cache.len_storage(), 1);
        assert_eq!(cache.len_pointers(), 2);
        assert!(cache.contains_key(&"current".to_owned()));
        assert!(cache.contains_key(&"expired".to_owned()));

        // Provide an inspectable metrics sink to validate the collected data.
        let (rx, sink) = SpyMetricSink::new();
        let metrics_client = StatsdClient::from_sink("merino-test", sink);

        let mut suggester = Suggester {
            inner: Box::new(NullProvider),
            metrics_client,
            items: cache.clone(),
            default_ttl: Duration::from_secs(30),
            default_lock_timeout: Duration::from_secs(1),
            max_background_removals: usize::MAX,
        };

        suggester.remove_expired_entries();

        assert_eq!(cache.len_storage(), 1);
        assert_eq!(cache.len_pointers(), 1);
        assert!(cache.contains_key(&"current".to_owned()));
        assert!(!cache.contains_key(&"expired".to_owned()));

        // Verify the reported metric.
        assert_eq!(rx.len(), 3);
        let collected_data: Vec<String> = rx
            .try_iter()
            .map(|x| String::from_utf8(x).unwrap())
            .collect();

        dbg!(&collected_data);

        let mut iter = collected_data.iter();
        let mut value = iter.next().unwrap();
        assert_eq!(value, "merino-test.cache.memory.storage-len:1|g");

        value = iter.next().unwrap();
        assert_eq!(value, "merino-test.cache.memory.pointers-len:1|g");

        value = iter.next().unwrap();
        assert!(value.starts_with("merino-test.cache.memory.duration:"));
        assert!(value.ends_with("|ms"));
    }

    #[test]
    fn cache_lock_test() {
        let lock_name = "testLock";
        let other_lock_name = "otherLock";
        let timeout = Duration::from_secs(3);
        let lock = LOCK_TABLE.add_lock(lock_name, timeout);
        let mut lock_check = false;
        LOCK_TABLE.add_lock(other_lock_name, timeout);
        assert!(LOCK_TABLE.is_locked(lock_name));
        assert!(!LOCK_TABLE.is_locked("unlocked"));

        LOCK_TABLE.update(lock_name, lock, || lock_check = true);

        assert!(lock_check);
        assert!(!LOCK_TABLE.is_locked(lock_name));

        // Should fail, lock dismissed
        LOCK_TABLE.update(lock_name, lock, || lock_check = false);
        assert!(lock_check);

        // Should fail, wrong lock value
        LOCK_TABLE.update(other_lock_name, lock, || lock_check = false);
        assert!(lock_check);
    }

    #[tokio::test]
    async fn test_reconfigure() {
        let cache: Arc<DedupedMap<String, Instant, Vec<Suggestion>>> = Arc::new(DedupedMap::new());
        // Provide an inspectable metrics sink to validate the collected data.
        let (_, sink) = SpyMetricSink::new();
        let metrics_client = StatsdClient::from_sink("merino-test", sink);

        let mut provider = Suggester {
            inner: Box::new(NullProvider),
            metrics_client,
            items: cache.clone(),
            default_ttl: Duration::from_secs(30),
            default_lock_timeout: Duration::from_secs(1),
            max_background_removals: usize::MAX,
        };

        // This won't be called as `DelayProvider::reconfigure()` will always succeed.
        let make_fresh: MakeFreshType = Box::new(move |_fresh_config: SuggestionProviderConfig| {
            unreachable!();
        });

        let value =
            serde_json::to_value(MemoryCacheConfig::default()).expect("failed to serialize");
        provider
            .reconfigure(value, &make_fresh)
            .await
            .expect("failed to reconfigure");
        let default = MemoryCacheConfig::default();

        assert_eq!(provider.default_ttl, default.default_ttl);
        assert_eq!(provider.default_lock_timeout, default.default_lock_timeout);
        assert_eq!(
            provider.max_background_removals,
            default.max_removed_entries
        );
    }
}