Merge pull request #29149 from ankush/perf/client_assets_json
perf: use client cache for assets.json
This commit is contained in:
commit
b338bdd5f2
3 changed files with 30 additions and 19 deletions
|
|
@ -97,3 +97,17 @@ class TestClientCache(IntegrationTestCase):
|
|||
frappe.client_cache.set_value(TEST_KEY, val)
|
||||
|
||||
self.assertEqual(frappe.client_cache.get_value(TEST_KEY), frappe.cache.get_value(TEST_KEY))
|
||||
|
||||
def test_shared_keys(self):
|
||||
val = frappe.generate_hash()
|
||||
frappe.client_cache.set_value(TEST_KEY, val, shared=True)
|
||||
with self.assertRedisCallCounts(0):
|
||||
self.assertEqual(frappe.client_cache.get_value(TEST_KEY, shared=True), val)
|
||||
|
||||
def test_generator(self):
|
||||
val = frappe.generate_hash()
|
||||
with self.assertRedisCallCounts(2, exact=True):
|
||||
self.assertEqual(frappe.client_cache.get_value(TEST_KEY, generator=lambda: val), val)
|
||||
|
||||
with self.assertRedisCallCounts(0):
|
||||
self.assertEqual(frappe.client_cache.get_value(TEST_KEY, generator=lambda: val), val)
|
||||
|
|
|
|||
|
|
@ -961,18 +961,15 @@ def get_assets_json():
|
|||
|
||||
return assets
|
||||
|
||||
if not hasattr(frappe.local, "assets_json"):
|
||||
if not frappe.conf.developer_mode:
|
||||
frappe.local.assets_json = frappe.cache.get_value(
|
||||
"assets_json",
|
||||
_get_assets,
|
||||
shared=True,
|
||||
)
|
||||
if not frappe.conf.developer_mode:
|
||||
return frappe.client_cache.get_value(
|
||||
"assets_json",
|
||||
shared=True,
|
||||
generator=_get_assets,
|
||||
)
|
||||
|
||||
else:
|
||||
frappe.local.assets_json = _get_assets()
|
||||
|
||||
return frappe.local.assets_json
|
||||
else:
|
||||
return _get_assets()
|
||||
|
||||
|
||||
def get_bench_relative_path(file_path):
|
||||
|
|
|
|||
|
|
@ -482,11 +482,11 @@ class ClientCache:
|
|||
)
|
||||
self.invalidator_thread = self.run_invalidator_thread()
|
||||
|
||||
def get_value(self, key):
|
||||
def get_value(self, key, *, shared=False, generator=None):
|
||||
if not self.healthy:
|
||||
return self.redis.get_value(key)
|
||||
return self.redis.get_value(key, shared=shared, generator=generator)
|
||||
|
||||
key = self.redis.make_key(key)
|
||||
key = self.redis.make_key(key, shared=shared)
|
||||
try:
|
||||
val = self.cache[key]
|
||||
if time.monotonic() < val.expiry and self.healthy:
|
||||
|
|
@ -501,7 +501,7 @@ class ClientCache:
|
|||
with self.lock:
|
||||
self.cache[key] = _PLACEHOLDER_VALUE
|
||||
|
||||
val = self.redis.get_value(key, shared=True, use_local_cache=not self.healthy)
|
||||
val = self.redis.get_value(key, shared=True, use_local_cache=not self.healthy, generator=generator)
|
||||
|
||||
# Note: We should not "cache" the cache-misses in client cache.
|
||||
# This cache is long lived and "misses" are not tracked by redis so they'll never get
|
||||
|
|
@ -518,8 +518,8 @@ class ClientCache:
|
|||
|
||||
return val
|
||||
|
||||
def set_value(self, key, val):
|
||||
key = self.redis.make_key(key)
|
||||
def set_value(self, key, val, *, shared=False):
|
||||
key = self.redis.make_key(key, shared=shared)
|
||||
self.ensure_max_size()
|
||||
self.redis.set_value(key, val, shared=True)
|
||||
with self.lock:
|
||||
|
|
@ -536,8 +536,8 @@ class ClientCache:
|
|||
with self.lock, suppress(RuntimeError):
|
||||
self.cache.pop(next(iter(self.cache)), None)
|
||||
|
||||
def delete_value(self, key):
|
||||
key = self.redis.make_key(key)
|
||||
def delete_value(self, key, *, shared=False):
|
||||
key = self.redis.make_key(key, shared=shared)
|
||||
self.redis.delete_value(key, shared=True)
|
||||
with self.lock:
|
||||
self.cache.pop(key, None)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue