1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
|
from django.middleware.csrf import get_token
from collections import OrderedDict
from threading import Lock
from django.core.cache.backends.base import BaseCache
from django.core.cache.backends.locmem import LocMemCache
import threading
_thread_locals = threading.local()
def get_current_request():
return getattr(_thread_locals, 'request', None)
def reset_current_request():
setattr(_thread_locals, 'request', None)
class RequestCache(LocMemCache):
"""
RequestCache is a customized LocMemCache which stores its data cache as an instance attribute, rather than
a global. It's designed to live only as long as the request object that RequestCacheMiddleware attaches it to.
"""
def __init__(self):
# We explicitly do not call super() here, because while we want BaseCache.__init__() to run, we *don't*
# want LocMemCache.__init__() to run, because that would store our caches in its globals.
BaseCache.__init__(self, params={})
self._cache = OrderedDict()
self._expire_info = {}
self._lock = Lock()
def request_cache_middleware(get_response):
def middleware(request):
get_token(request) # force CSRFTOKEN setup
_thread_locals.request = request
request.cache = RequestCache()
response = get_response(request)
reset_current_request()
return response
return middleware
def get_request_cache():
"""
Return the current requests cache
:return:
"""
return getattr(get_current_request(), "cache", None)
# marker for separating args from kwargs (needs to be global)
cache_args_kwargs_marker = object()
def cache_calculate_key(*args, **kwargs):
"""
Calculate the cache key of a function call with args and kwargs
Taken from lru_cache
:param args:
:param kwargs:
:return: the calculated key for the function call
:rtype: basestring
"""
# combine args with kwargs, separated by the cache_args_kwargs_marker
key = args + (cache_args_kwargs_marker,) + tuple(sorted(kwargs.items()))
# return as a string
return str(key)
def cache_for_request(fn):
"""
Decorator that allows to cache a function call with parameters and its result only for the current request
The result is stored in the memory of the current process
As soon as the request is destroyed, the cache is destroyed
:param fn:
:return:
"""
def wrapper(*args, **kwargs):
cache = get_request_cache()
if not cache:
# no cache found -> directly execute function without caching
return fn(*args, **kwargs)
# cache found -> check if a result is already available for this function call
key = cache_calculate_key(fn.__name__, *args, **kwargs)
result = getattr(cache, key, None)
if not result:
# no result available -> execute function
result = fn(*args, **kwargs)
setattr(cache, key, result)
else:
print('"{}"'.format(key), "Youpi")
return result
return wrapper
def set_default_cache(cache, request):
has_ishtar_user = hasattr(request.user, "ishtaruser")
cache.set("ishtar_user.exists", has_ishtar_user)
if not has_ishtar_user:
return
cache.set("ishtar_user.pk", request.user.ishtaruser.pk)
is_admin = request.user.ishtaruser.has_right("administrator", request.session)
cache.set("ishtar_user.is_administrator", is_admin)
# profile
current_profile = request.user.ishtaruser.current_profile
cache.set("ishtar_user.has_profile", current_profile is not None)
if current_profile:
cache.set("ishtar_user.profile.profile_type_pk",
current_profile.profile_type_id)
cache.set("ishtar_user.profile.auto_pin",
current_profile.auto_pin)
|