Initial YakPanel commit
This commit is contained in:
23
class/cachelib/__init__.py
Normal file
23
class/cachelib/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from cachelib.base import BaseCache, NullCache
|
||||
from cachelib.simple import SimpleCache
|
||||
from cachelib.session_simpile import SimpleCacheSession
|
||||
from cachelib.file import FileSystemCache
|
||||
from cachelib.memcached import MemcachedCache
|
||||
from cachelib.redis import RedisCache
|
||||
from cachelib.uwsgi import UWSGICache
|
||||
|
||||
__all__ = [
|
||||
'BaseCache',
|
||||
'NullCache',
|
||||
'SimpleCache',
|
||||
'FileSystemCache',
|
||||
'MemcachedCache',
|
||||
'RedisCache',
|
||||
'UWSGICache',
|
||||
'SimpleCacheSession'
|
||||
]
|
||||
|
||||
__version__ = '0.1'
|
||||
__author__ = 'Pallets Team'
|
||||
26
class/cachelib/_compat.py
Normal file
26
class/cachelib/_compat.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# flake8: noqa
|
||||
import sys
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
|
||||
if PY2:
|
||||
text_type = unicode
|
||||
string_types = (str, unicode)
|
||||
integer_types = (int, long)
|
||||
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
|
||||
|
||||
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
|
||||
if x is None or isinstance(x, str):
|
||||
return x
|
||||
return x.encode(charset, errors)
|
||||
|
||||
else:
|
||||
text_type = str
|
||||
string_types = (str, )
|
||||
integer_types = (int, )
|
||||
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
|
||||
|
||||
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
|
||||
if x is None or isinstance(x, str):
|
||||
return x
|
||||
return x.decode(charset, errors)
|
||||
198
class/cachelib/base.py
Normal file
198
class/cachelib/base.py
Normal file
@@ -0,0 +1,198 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from cachelib._compat import iteritems
|
||||
|
||||
|
||||
def _items(mappingorseq):
|
||||
"""Wrapper for efficient iteration over mappings represented by dicts
|
||||
or sequences::
|
||||
|
||||
>>> for k, v in _items((i, i*i) for i in xrange(5)):
|
||||
... assert k*k == v
|
||||
|
||||
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
|
||||
... assert k*k == v
|
||||
|
||||
"""
|
||||
if hasattr(mappingorseq, 'items'):
|
||||
return iteritems(mappingorseq)
|
||||
return mappingorseq
|
||||
|
||||
|
||||
class BaseCache(object):
|
||||
|
||||
"""Baseclass for the cache systems. All the cache systems implement this
|
||||
API or a superset of it.
|
||||
|
||||
:param default_timeout: the default timeout (in seconds) that is used if
|
||||
no timeout is specified on :meth:`set`. A timeout
|
||||
of 0 indicates that the cache never expires.
|
||||
"""
|
||||
|
||||
def __init__(self, default_timeout=300):
|
||||
self.default_timeout = default_timeout
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
return timeout
|
||||
|
||||
def get(self, key):
|
||||
"""Look up key in the cache and return the value for it.
|
||||
|
||||
:param key: the key to be looked up.
|
||||
:returns: The value if it exists and is readable, else ``None``.
|
||||
"""
|
||||
return None
|
||||
|
||||
def delete(self, key):
|
||||
"""Delete `key` from the cache.
|
||||
|
||||
:param key: the key to delete.
|
||||
:returns: Whether the key existed and has been deleted.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return True
|
||||
|
||||
def get_many(self, *keys):
|
||||
"""Returns a list of values for the given keys.
|
||||
For each key an item in the list is created::
|
||||
|
||||
foo, bar = cache.get_many("foo", "bar")
|
||||
|
||||
Has the same error handling as :meth:`get`.
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
"""
|
||||
return [self.get(k) for k in keys]
|
||||
|
||||
def get_dict(self, *keys):
|
||||
"""Like :meth:`get_many` but return a dict::
|
||||
|
||||
d = cache.get_dict("foo", "bar")
|
||||
foo = d["foo"]
|
||||
bar = d["bar"]
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
"""
|
||||
return dict(zip(keys, self.get_many(*keys)))
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
"""Add a new key/value to the cache (overwrites value, if key already
|
||||
exists in the cache).
|
||||
|
||||
:param key: the key to set
|
||||
:param value: the value for the key
|
||||
:param timeout: the cache timeout for the key in seconds (if not
|
||||
specified, it uses the default timeout). A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:returns: ``True`` if key has been updated, ``False`` for backend
|
||||
errors. Pickling errors, however, will raise a subclass of
|
||||
``pickle.PickleError``.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return True
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
"""Works like :meth:`set` but does not overwrite the values of already
|
||||
existing keys.
|
||||
|
||||
:param key: the key to set
|
||||
:param value: the value for the key
|
||||
:param timeout: the cache timeout for the key in seconds (if not
|
||||
specified, it uses the default timeout). A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:returns: Same as :meth:`set`, but also ``False`` for already
|
||||
existing keys.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return True
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
"""Sets multiple keys and values from a mapping.
|
||||
|
||||
:param mapping: a mapping with the keys/values to set.
|
||||
:param timeout: the cache timeout for the key in seconds (if not
|
||||
specified, it uses the default timeout). A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:returns: Whether all given keys have been set.
|
||||
:rtype: boolean
|
||||
"""
|
||||
rv = True
|
||||
for key, value in _items(mapping):
|
||||
if not self.set(key, value, timeout):
|
||||
rv = False
|
||||
return rv
|
||||
|
||||
def delete_many(self, *keys):
|
||||
"""Deletes multiple keys at once.
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
:returns: Whether all given keys have been deleted.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return all(self.delete(key) for key in keys)
|
||||
|
||||
def has(self, key):
|
||||
"""Checks if a key exists in the cache without returning it. This is a
|
||||
cheap operation that bypasses loading the actual data on the backend.
|
||||
|
||||
This method is optional and may not be implemented on all caches.
|
||||
|
||||
:param key: the key to check
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'%s doesn\'t have an efficient implementation of `has`. That '
|
||||
'means it is impossible to check whether a key exists without '
|
||||
'fully loading the key\'s data. Consider using `self.get` '
|
||||
'explicitly if you don\'t care about performance.'
|
||||
)
|
||||
|
||||
def clear(self):
|
||||
"""Clears the cache. Keep in mind that not all caches support
|
||||
completely clearing the cache.
|
||||
|
||||
:returns: Whether the cache has been cleared.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return True
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
"""Increments the value of a key by `delta`. If the key does
|
||||
not yet exist it is initialized with `delta`.
|
||||
|
||||
For supporting caches this is an atomic operation.
|
||||
|
||||
:param key: the key to increment.
|
||||
:param delta: the delta to add.
|
||||
:returns: The new value or ``None`` for backend errors.
|
||||
"""
|
||||
value = (self.get(key) or 0) + delta
|
||||
return value if self.set(key, value) else None
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
"""Decrements the value of a key by `delta`. If the key does
|
||||
not yet exist it is initialized with `-delta`.
|
||||
|
||||
For supporting caches this is an atomic operation.
|
||||
|
||||
:param key: the key to increment.
|
||||
:param delta: the delta to subtract.
|
||||
:returns: The new value or `None` for backend errors.
|
||||
"""
|
||||
value = (self.get(key) or 0) - delta
|
||||
return value if self.set(key, value) else None
|
||||
|
||||
|
||||
class NullCache(BaseCache):
|
||||
|
||||
"""A cache that doesn't cache. This can be useful for unit testing.
|
||||
|
||||
:param default_timeout: a dummy parameter that is ignored but exists
|
||||
for API compatibility with other caches.
|
||||
"""
|
||||
|
||||
def has(self, key):
|
||||
return False
|
||||
190
class/cachelib/file.py
Normal file
190
class/cachelib/file.py
Normal file
@@ -0,0 +1,190 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import errno
|
||||
import tempfile
|
||||
from hashlib import md5
|
||||
from time import time
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: no cover
|
||||
import pickle
|
||||
|
||||
from cachelib.base import BaseCache
|
||||
from cachelib._compat import text_type
|
||||
|
||||
|
||||
class FileSystemCache(BaseCache):
|
||||
|
||||
"""A cache that stores the items on the file system. This cache depends
|
||||
on being the only user of the `cache_dir`. Make absolutely sure that
|
||||
nobody but this cache stores files there or otherwise the cache will
|
||||
randomly delete files therein.
|
||||
|
||||
:param cache_dir: the directory where cache files are stored.
|
||||
:param threshold: the maximum number of items the cache stores before
|
||||
it starts deleting some. A threshold value of 0
|
||||
indicates no threshold.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:param mode: the file mode wanted for the cache files, default 0600
|
||||
"""
|
||||
|
||||
#: used for temporary files by the FileSystemCache
|
||||
_fs_transaction_suffix = '.__wz_cache'
|
||||
#: keep amount of files in a cache element
|
||||
_fs_count_file = '__wz_cache_count'
|
||||
|
||||
def __init__(self, cache_dir, threshold=500, default_timeout=300,
|
||||
mode=0o600):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
self._path = cache_dir
|
||||
self._threshold = threshold
|
||||
self._mode = mode
|
||||
|
||||
try:
|
||||
os.makedirs(self._path)
|
||||
except OSError as ex:
|
||||
if ex.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
# If there are many files and a zero threshold,
|
||||
# the list_dir can slow initialisation massively
|
||||
if self._threshold != 0:
|
||||
self._update_count(value=len(self._list_dir()))
|
||||
|
||||
@property
|
||||
def _file_count(self):
|
||||
return self.get(self._fs_count_file) or 0
|
||||
|
||||
def _update_count(self, delta=None, value=None):
|
||||
# If we have no threshold, don't count files
|
||||
if self._threshold == 0:
|
||||
return
|
||||
|
||||
if delta:
|
||||
new_count = self._file_count + delta
|
||||
else:
|
||||
new_count = value or 0
|
||||
self.set(self._fs_count_file, new_count, mgmt_element=True)
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout != 0:
|
||||
timeout = time() + timeout
|
||||
return int(timeout)
|
||||
|
||||
def _list_dir(self):
|
||||
"""return a list of (fully qualified) cache filenames
|
||||
"""
|
||||
mgmt_files = [self._get_filename(name).split('/')[-1]
|
||||
for name in (self._fs_count_file,)]
|
||||
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
|
||||
if not fn.endswith(self._fs_transaction_suffix)
|
||||
and fn not in mgmt_files]
|
||||
|
||||
def _prune(self):
|
||||
if self._threshold == 0 or not self._file_count > self._threshold:
|
||||
return
|
||||
|
||||
entries = self._list_dir()
|
||||
now = time()
|
||||
for idx, fname in enumerate(entries):
|
||||
try:
|
||||
remove = False
|
||||
with open(fname, 'rb') as f:
|
||||
expires = pickle.load(f)
|
||||
remove = (expires != 0 and expires <= now) or idx % 3 == 0
|
||||
|
||||
if remove:
|
||||
os.remove(fname)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
self._update_count(value=len(self._list_dir()))
|
||||
|
||||
def clear(self):
|
||||
for fname in self._list_dir():
|
||||
try:
|
||||
os.remove(fname)
|
||||
except (IOError, OSError):
|
||||
self._update_count(value=len(self._list_dir()))
|
||||
return False
|
||||
self._update_count(value=0)
|
||||
return True
|
||||
|
||||
def _get_filename(self, key):
|
||||
if isinstance(key, text_type):
|
||||
key = key.encode('utf-8') # XXX unicode review
|
||||
hash = md5(key).hexdigest()
|
||||
return os.path.join(self._path, hash)
|
||||
|
||||
def get(self, key):
|
||||
filename = self._get_filename(key)
|
||||
try:
|
||||
with open(filename, 'rb') as f:
|
||||
pickle_time = pickle.load(f)
|
||||
if pickle_time == 0 or pickle_time >= time():
|
||||
return pickle.load(f)
|
||||
else:
|
||||
os.remove(filename)
|
||||
return None
|
||||
except (IOError, OSError, pickle.PickleError):
|
||||
return None
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
filename = self._get_filename(key)
|
||||
if not os.path.exists(filename):
|
||||
return self.set(key, value, timeout)
|
||||
return False
|
||||
|
||||
def set(self, key, value, timeout=None, mgmt_element=False):
|
||||
# Management elements have no timeout
|
||||
if mgmt_element:
|
||||
timeout = 0
|
||||
|
||||
# Don't prune on management element update, to avoid loop
|
||||
else:
|
||||
self._prune()
|
||||
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
filename = self._get_filename(key)
|
||||
try:
|
||||
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
|
||||
dir=self._path)
|
||||
with os.fdopen(fd, 'wb') as f:
|
||||
pickle.dump(timeout, f, 1)
|
||||
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
os.rename(tmp, filename)
|
||||
os.chmod(filename, self._mode)
|
||||
except (IOError, OSError):
|
||||
return False
|
||||
else:
|
||||
# Management elements should not count towards threshold
|
||||
if not mgmt_element:
|
||||
self._update_count(delta=1)
|
||||
return True
|
||||
|
||||
def delete(self, key, mgmt_element=False):
|
||||
try:
|
||||
os.remove(self._get_filename(key))
|
||||
except (IOError, OSError):
|
||||
return False
|
||||
else:
|
||||
# Management elements should not count towards threshold
|
||||
if not mgmt_element:
|
||||
self._update_count(delta=-1)
|
||||
return True
|
||||
|
||||
def has(self, key):
|
||||
filename = self._get_filename(key)
|
||||
try:
|
||||
with open(filename, 'rb') as f:
|
||||
pickle_time = pickle.load(f)
|
||||
if pickle_time == 0 or pickle_time >= time():
|
||||
return True
|
||||
else:
|
||||
os.remove(filename)
|
||||
return False
|
||||
except (IOError, OSError, pickle.PickleError):
|
||||
return False
|
||||
186
class/cachelib/memcached.py
Normal file
186
class/cachelib/memcached.py
Normal file
@@ -0,0 +1,186 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from time import time
|
||||
from cachelib._compat import iteritems, to_native
|
||||
from cachelib.base import BaseCache, _items
|
||||
|
||||
|
||||
_test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match
|
||||
|
||||
|
||||
class MemcachedCache(BaseCache):
|
||||
|
||||
"""A cache that uses memcached as backend.
|
||||
|
||||
The first argument can either be an object that resembles the API of a
|
||||
:class:`memcache.Client` or a tuple/list of server addresses. In the
|
||||
event that a tuple/list is passed, Werkzeug tries to import the best
|
||||
available memcache library.
|
||||
|
||||
This cache looks into the following packages/modules to find bindings for
|
||||
memcached:
|
||||
|
||||
- ``pylibmc``
|
||||
- ``google.appengine.api.memcached``
|
||||
- ``memcached``
|
||||
- ``libmc``
|
||||
|
||||
Implementation notes: This cache backend works around some limitations in
|
||||
memcached to simplify the interface. For example unicode keys are encoded
|
||||
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
|
||||
the keys in the same format as passed. Furthermore all get methods
|
||||
silently ignore key errors to not cause problems when untrusted user data
|
||||
is passed to the get methods which is often the case in web applications.
|
||||
|
||||
:param servers: a list or tuple of server addresses or alternatively
|
||||
a :class:`memcache.Client` or a compatible client.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:param key_prefix: a prefix that is added before all keys. This makes it
|
||||
possible to use the same memcached server for different
|
||||
applications. Keep in mind that
|
||||
:meth:`~BaseCache.clear` will also clear keys with a
|
||||
different prefix.
|
||||
"""
|
||||
|
||||
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
if servers is None or isinstance(servers, (list, tuple)):
|
||||
if servers is None:
|
||||
servers = ['127.0.0.1:11211']
|
||||
self._client = self.import_preferred_memcache_lib(servers)
|
||||
if self._client is None:
|
||||
raise RuntimeError('no memcache module found')
|
||||
else:
|
||||
# NOTE: servers is actually an already initialized memcache
|
||||
# client.
|
||||
self._client = servers
|
||||
|
||||
self.key_prefix = to_native(key_prefix)
|
||||
|
||||
def _normalize_key(self, key):
|
||||
key = to_native(key, 'utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
return key
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout > 0:
|
||||
timeout = int(time()) + timeout
|
||||
return timeout
|
||||
|
||||
def get(self, key):
|
||||
key = self._normalize_key(key)
|
||||
# memcached doesn't support keys longer than that. Because often
|
||||
# checks for so long keys can occur because it's tested from user
|
||||
# submitted data etc we fail silently for getting.
|
||||
if _test_memcached_key(key):
|
||||
return self._client.get(key)
|
||||
|
||||
def get_dict(self, *keys):
|
||||
key_mapping = {}
|
||||
have_encoded_keys = False
|
||||
for key in keys:
|
||||
encoded_key = self._normalize_key(key)
|
||||
if not isinstance(key, str):
|
||||
have_encoded_keys = True
|
||||
if _test_memcached_key(key):
|
||||
key_mapping[encoded_key] = key
|
||||
_keys = list(key_mapping)
|
||||
d = rv = self._client.get_multi(_keys)
|
||||
if have_encoded_keys or self.key_prefix:
|
||||
rv = {}
|
||||
for key, value in iteritems(d):
|
||||
rv[key_mapping[key]] = value
|
||||
if len(rv) < len(keys):
|
||||
for key in keys:
|
||||
if key not in rv:
|
||||
rv[key] = None
|
||||
return rv
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
key = self._normalize_key(key)
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
return self._client.add(key, value, timeout)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
key = self._normalize_key(key)
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
return self._client.set(key, value, timeout)
|
||||
|
||||
def get_many(self, *keys):
|
||||
d = self.get_dict(*keys)
|
||||
return [d[key] for key in keys]
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
new_mapping = {}
|
||||
for key, value in _items(mapping):
|
||||
key = self._normalize_key(key)
|
||||
new_mapping[key] = value
|
||||
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
failed_keys = self._client.set_multi(new_mapping, timeout)
|
||||
return not failed_keys
|
||||
|
||||
def delete(self, key):
|
||||
key = self._normalize_key(key)
|
||||
if _test_memcached_key(key):
|
||||
return self._client.delete(key)
|
||||
|
||||
def delete_many(self, *keys):
|
||||
new_keys = []
|
||||
for key in keys:
|
||||
key = self._normalize_key(key)
|
||||
if _test_memcached_key(key):
|
||||
new_keys.append(key)
|
||||
return self._client.delete_multi(new_keys)
|
||||
|
||||
def has(self, key):
|
||||
key = self._normalize_key(key)
|
||||
if _test_memcached_key(key):
|
||||
return self._client.append(key, '')
|
||||
return False
|
||||
|
||||
def clear(self):
|
||||
return self._client.flush_all()
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
key = self._normalize_key(key)
|
||||
return self._client.incr(key, delta)
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
key = self._normalize_key(key)
|
||||
return self._client.decr(key, delta)
|
||||
|
||||
def import_preferred_memcache_lib(self, servers):
|
||||
"""Returns an initialized memcache client. Used by the constructor."""
|
||||
try:
|
||||
import pylibmc
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return pylibmc.Client(servers)
|
||||
|
||||
try:
|
||||
from google.appengine.api import memcache
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return memcache.Client()
|
||||
|
||||
try:
|
||||
import memcache
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return memcache.Client(servers)
|
||||
|
||||
try:
|
||||
import libmc
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return libmc.Client(servers)
|
||||
154
class/cachelib/redis.py
Normal file
154
class/cachelib/redis.py
Normal file
@@ -0,0 +1,154 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: no cover
|
||||
import pickle
|
||||
|
||||
from cachelib.base import BaseCache, _items
|
||||
from cachelib._compat import string_types, integer_types
|
||||
|
||||
|
||||
class RedisCache(BaseCache):
|
||||
"""Uses the Redis key-value store as a cache backend.
|
||||
|
||||
The first argument can be either a string denoting address of the Redis
|
||||
server or an object resembling an instance of a redis.Redis class.
|
||||
|
||||
Note: Python Redis API already takes care of encoding unicode strings on
|
||||
the fly.
|
||||
|
||||
:param host: address of the Redis server or an object which API is
|
||||
compatible with the official Python Redis client (redis-py).
|
||||
:param port: port number on which Redis server listens for connections.
|
||||
:param password: password authentication for the Redis server.
|
||||
:param db: db (zero-based numeric index) on Redis Server to connect.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:param key_prefix: A prefix that should be added to all keys.
|
||||
|
||||
Any additional keyword arguments will be passed to ``redis.Redis``.
|
||||
"""
|
||||
|
||||
def __init__(self, host='localhost', port=6379, password=None,
|
||||
db=0, default_timeout=300, key_prefix=None, **kwargs):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
if host is None:
|
||||
raise ValueError('RedisCache host parameter may not be None')
|
||||
if isinstance(host, string_types):
|
||||
try:
|
||||
import redis
|
||||
except ImportError:
|
||||
raise RuntimeError('no redis module found')
|
||||
if kwargs.get('decode_responses', None):
|
||||
raise ValueError('decode_responses is not supported by '
|
||||
'RedisCache.')
|
||||
self._client = redis.Redis(host=host, port=port, password=password,
|
||||
db=db, **kwargs)
|
||||
else:
|
||||
self._client = host
|
||||
self.key_prefix = key_prefix or ''
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout == 0:
|
||||
timeout = -1
|
||||
return timeout
|
||||
|
||||
def dump_object(self, value):
|
||||
"""Dumps an object into a string for redis. By default it serializes
|
||||
integers as regular string and pickle dumps everything else.
|
||||
"""
|
||||
t = type(value)
|
||||
if t in integer_types:
|
||||
return str(value).encode('ascii')
|
||||
return b'!' + pickle.dumps(value)
|
||||
|
||||
def load_object(self, value):
|
||||
"""The reversal of :meth:`dump_object`. This might be called with
|
||||
None.
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
if value.startswith(b'!'):
|
||||
try:
|
||||
return pickle.loads(value[1:])
|
||||
except pickle.PickleError:
|
||||
return None
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
# before 0.8 we did not have serialization. Still support that.
|
||||
return value
|
||||
|
||||
def get(self, key):
|
||||
return self.load_object(self._client.get(self.key_prefix + key))
|
||||
|
||||
def get_many(self, *keys):
|
||||
if self.key_prefix:
|
||||
keys = [self.key_prefix + key for key in keys]
|
||||
return [self.load_object(x) for x in self._client.mget(keys)]
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
dump = self.dump_object(value)
|
||||
if timeout == -1:
|
||||
result = self._client.set(name=self.key_prefix + key,
|
||||
value=dump)
|
||||
else:
|
||||
result = self._client.setex(name=self.key_prefix + key,
|
||||
value=dump, time=timeout)
|
||||
return result
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
dump = self.dump_object(value)
|
||||
return (
|
||||
self._client.setnx(name=self.key_prefix + key, value=dump) and
|
||||
self._client.expire(name=self.key_prefix + key, time=timeout)
|
||||
)
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
# Use transaction=False to batch without calling redis MULTI
|
||||
# which is not supported by twemproxy
|
||||
pipe = self._client.pipeline(transaction=False)
|
||||
|
||||
for key, value in _items(mapping):
|
||||
dump = self.dump_object(value)
|
||||
if timeout == -1:
|
||||
pipe.set(name=self.key_prefix + key, value=dump)
|
||||
else:
|
||||
pipe.setex(name=self.key_prefix + key, value=dump,
|
||||
time=timeout)
|
||||
return pipe.execute()
|
||||
|
||||
def delete(self, key):
|
||||
return self._client.delete(self.key_prefix + key)
|
||||
|
||||
def delete_many(self, *keys):
|
||||
if not keys:
|
||||
return
|
||||
if self.key_prefix:
|
||||
keys = [self.key_prefix + key for key in keys]
|
||||
return self._client.delete(*keys)
|
||||
|
||||
def has(self, key):
|
||||
return self._client.exists(self.key_prefix + key)
|
||||
|
||||
def clear(self):
|
||||
status = False
|
||||
if self.key_prefix:
|
||||
keys = self._client.keys(self.key_prefix + '*')
|
||||
if keys:
|
||||
status = self._client.delete(*keys)
|
||||
else:
|
||||
status = self._client.flushdb()
|
||||
return status
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
return self._client.incr(name=self.key_prefix + key, amount=delta)
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
return self._client.decr(name=self.key_prefix + key, amount=delta)
|
||||
195
class/cachelib/session_simpile.py
Normal file
195
class/cachelib/session_simpile.py
Normal file
@@ -0,0 +1,195 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from time import time
|
||||
import os,struct
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: no cover
|
||||
import pickle
|
||||
|
||||
from cachelib.base import BaseCache
|
||||
import builtins
|
||||
|
||||
safe_builtins = {
|
||||
'range',
|
||||
'complex',
|
||||
'set',
|
||||
'frozenset',
|
||||
'slice',
|
||||
}
|
||||
|
||||
class RestrictedUnpickler(pickle.Unpickler):
|
||||
def find_class(self, module, name):
|
||||
if module == "builtins" and name in safe_builtins:
|
||||
# print(name)
|
||||
return getattr(builtins, name)
|
||||
return None
|
||||
|
||||
def restricted_loads(s):
|
||||
# return RestrictedUnpickler(io.BytesIO(s)).load()
|
||||
return True
|
||||
|
||||
|
||||
|
||||
class SimpleCacheSession(BaseCache):
|
||||
|
||||
"""Simple memory cache for single process environments. This class exists
|
||||
mainly for the development server and is not 100% thread safe. It tries
|
||||
to use as many atomic operations as possible and no locks for simplicity
|
||||
but it could happen under heavy load that keys are added multiple times.
|
||||
|
||||
:param threshold: the maximum number of items the cache stores before
|
||||
it starts deleting some.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
"""
|
||||
__session_key = 'BT_:'
|
||||
__session_basedir = '/www/server/panel/data/session'
|
||||
|
||||
def __init__(self, threshold=500, default_timeout=300):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
self._cache = {}
|
||||
self.clear = self._cache.clear
|
||||
self._threshold = threshold
|
||||
|
||||
def _prune(self):
|
||||
if len(self._cache) > self._threshold:
|
||||
now = time()
|
||||
toremove = []
|
||||
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
|
||||
if (expires != 0 and expires <= now) or not os.path.exists(os.path.join(self.__session_basedir, self.md5(key))):
|
||||
toremove.append(key)
|
||||
for key in toremove:
|
||||
self._cache.pop(key, None)
|
||||
self.del_session_by_file(key)
|
||||
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout > 0:
|
||||
timeout = time() + timeout
|
||||
return timeout
|
||||
|
||||
def get_session_by_file(self,key):
|
||||
try:
|
||||
if key[:4] == self.__session_key:
|
||||
filename = '/'.join((self.__session_basedir,self.md5(key)))
|
||||
if not os.path.exists(filename): return None
|
||||
|
||||
with open(filename, 'rb') as fp:
|
||||
_val = fp.read()
|
||||
fp.close()
|
||||
expires = struct.unpack('f',_val[:4])[0]
|
||||
if expires == 0 or expires > time():
|
||||
value = _val[4:]
|
||||
|
||||
self._cache[key] = (expires,value)
|
||||
return pickle.loads(value)
|
||||
except :pass
|
||||
|
||||
def set_session_by_file(self,key,_val,expires):
|
||||
try:
|
||||
if key[:4] == self.__session_key:
|
||||
if not os.path.exists(self.__session_basedir): os.makedirs(self.__session_basedir,384)
|
||||
expires = struct.pack('f',expires)
|
||||
filename = '/'.join((self.__session_basedir,self.md5(key)))
|
||||
fp = open(filename, 'wb+')
|
||||
fp.write(expires + _val)
|
||||
fp.close()
|
||||
os.chmod(filename,384)
|
||||
except :pass
|
||||
|
||||
def del_session_by_file(self,key):
|
||||
try:
|
||||
if key[:4] == self.__session_key:
|
||||
filename = '/'.join((self.__session_basedir,self.md5(key)))
|
||||
if os.path.exists(filename): os.remove(filename)
|
||||
except : pass
|
||||
|
||||
def get(self, key):
|
||||
if not isinstance(key,str): return None
|
||||
try:
|
||||
expires, value = self._cache[key]
|
||||
if expires == 0 or expires > time():
|
||||
return pickle.loads(value)
|
||||
except (KeyError, pickle.PickleError):
|
||||
return self.get_session_by_file(key)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
|
||||
# 类型判断
|
||||
if not isinstance(key,str): return False
|
||||
type_list=(int,float,bool,str,list,dict,tuple,set,bytes)
|
||||
value_type=type(value)
|
||||
if value_type not in type_list:
|
||||
return False
|
||||
|
||||
# 过期清理
|
||||
expires = self._normalize_timeout(timeout)
|
||||
self._prune()
|
||||
try:
|
||||
restricted_loads(pickle.dumps(value))
|
||||
except:
|
||||
return False
|
||||
|
||||
# 转换
|
||||
_val = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
|
||||
self._cache[key] = (expires,_val)
|
||||
self.set_session_by_file(key,_val,expires)
|
||||
return True
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
|
||||
# 类型判断
|
||||
if not isinstance(key,str): return False
|
||||
type_list=(int,float,bool,str,list,dict,tuple,set,bytes)
|
||||
value_type=type(value)
|
||||
if value_type not in type_list:
|
||||
return False
|
||||
|
||||
expires = self._normalize_timeout(timeout)
|
||||
self._prune()
|
||||
try:
|
||||
restricted_loads(pickle.dumps(value))
|
||||
except:
|
||||
return False
|
||||
item = (expires, pickle.dumps(value,pickle.HIGHEST_PROTOCOL))
|
||||
if key in self._cache:
|
||||
return False
|
||||
self._cache.setdefault(key, item)
|
||||
self.set_session_by_file(key,item[1],expires)
|
||||
return True
|
||||
|
||||
def delete(self, key):
|
||||
result = self._cache.pop(key, None) is not None
|
||||
self.del_session_by_file(key)
|
||||
return result
|
||||
|
||||
def has(self, key):
|
||||
try:
|
||||
expires, value = self._cache[key]
|
||||
return expires == 0 or expires > time()
|
||||
except KeyError:
|
||||
if self.get_session_by_file(key): return True
|
||||
return False
|
||||
|
||||
|
||||
def get_expire_time(self, key):
|
||||
try:
|
||||
expires, value = self._cache[key]
|
||||
return expires
|
||||
except KeyError:
|
||||
return 0
|
||||
|
||||
def md5(self,strings):
|
||||
"""
|
||||
生成MD5
|
||||
@strings 要被处理的字符串
|
||||
return string(32)
|
||||
"""
|
||||
import hashlib
|
||||
m = hashlib.md5()
|
||||
|
||||
m.update(strings.encode('utf-8'))
|
||||
return m.hexdigest()
|
||||
|
||||
347
class/cachelib/simple.py
Normal file
347
class/cachelib/simple.py
Normal file
@@ -0,0 +1,347 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from time import time
|
||||
import os,struct
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: no cover
|
||||
import pickle
|
||||
|
||||
from cachelib.base import BaseCache
|
||||
import io
|
||||
import builtins
|
||||
|
||||
safe_builtins = {
|
||||
'range',
|
||||
'complex',
|
||||
'set',
|
||||
'frozenset',
|
||||
'slice',
|
||||
}
|
||||
|
||||
class RestrictedUnpickler(pickle.Unpickler):
|
||||
def find_class(self, module, name):
|
||||
if module == "builtins" and name in safe_builtins:
|
||||
# print(name)
|
||||
return getattr(builtins, name)
|
||||
return None
|
||||
|
||||
def restricted_loads(s):
|
||||
# return RestrictedUnpickler(io.BytesIO(s)).load()
|
||||
return True
|
||||
|
||||
|
||||
|
||||
class SimpleCache(BaseCache):
|
||||
|
||||
"""Simple memory cache for single process environments. This class exists
|
||||
mainly for the development server and is not 100% thread safe. It tries
|
||||
to use as many atomic operations as possible and no locks for simplicity
|
||||
but it could happen under heavy load that keys are added multiple times.
|
||||
|
||||
:param threshold: the maximum number of items the cache stores before
|
||||
it starts deleting some.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
"""
|
||||
__session_key = 'BT_:'
|
||||
__session_basedir = '/www/server/panel/data/session'
|
||||
|
||||
__SHM_PREFIX = 'SHM_:'
|
||||
__SHM_BASEDIR = '/dev/shm/aap-shm'
|
||||
|
||||
def __init__(self, threshold=500, default_timeout=300):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
self._cache = {}
|
||||
self.clear = self._cache.clear
|
||||
self._threshold = threshold
|
||||
|
||||
def _prune(self):
|
||||
if len(self._cache) > self._threshold:
|
||||
now = time()
|
||||
toremove = []
|
||||
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
|
||||
if (expires != 0 and expires <= now) or idx % 3 == 0:
|
||||
toremove.append(key)
|
||||
for key in toremove:
|
||||
self._cache.pop(key, None)
|
||||
self.del_session_by_file(key)
|
||||
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout > 0:
|
||||
timeout = time() + timeout
|
||||
return timeout
|
||||
|
||||
def get_session_by_file(self,key):
|
||||
try:
|
||||
if key[:4] == self.__session_key:
|
||||
filename = '/'.join((self.__session_basedir,self.md5(key)))
|
||||
if not os.path.exists(filename): return None
|
||||
|
||||
with open(filename, 'rb') as fp:
|
||||
_val = fp.read()
|
||||
fp.close()
|
||||
expires = struct.unpack('f',_val[:4])[0]
|
||||
if expires == 0 or expires > time():
|
||||
value = _val[4:]
|
||||
|
||||
self._cache[key] = (expires,value)
|
||||
return pickle.loads(value)
|
||||
except :pass
|
||||
|
||||
def set_session_by_file(self,key,_val,expires):
|
||||
try:
|
||||
if key[:4] == self.__session_key:
|
||||
if not os.path.exists(self.__session_basedir): os.makedirs(self.__session_basedir,384)
|
||||
expires = struct.pack('f',expires)
|
||||
filename = '/'.join((self.__session_basedir,self.md5(key)))
|
||||
fp = open(filename, 'wb+')
|
||||
fp.write(expires + _val)
|
||||
fp.close()
|
||||
os.chmod(filename,384)
|
||||
except :pass
|
||||
|
||||
def del_session_by_file(self,key):
|
||||
try:
|
||||
if key[:4] == self.__session_key:
|
||||
filename = '/'.join((self.__session_basedir,self.md5(key)))
|
||||
if os.path.exists(filename): os.remove(filename)
|
||||
except : pass
|
||||
|
||||
def get(self, key):
|
||||
if not isinstance(key,str): return None
|
||||
|
||||
try:
|
||||
# 优先从shm中查找
|
||||
_shm_val = self.__get_shm(key)
|
||||
|
||||
if _shm_val is not None:
|
||||
return _shm_val
|
||||
except: pass
|
||||
|
||||
try:
|
||||
expires, value = self._cache[key]
|
||||
if expires == 0 or expires > time():
|
||||
return pickle.loads(value)
|
||||
except (KeyError, pickle.PickleError):
|
||||
return self.get_session_by_file(key)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
|
||||
# 类型判断
|
||||
if not isinstance(key,str): return False
|
||||
type_list=(int,float,bool,str,list,dict,tuple,set,bytes)
|
||||
value_type=type(value)
|
||||
if value_type not in type_list:
|
||||
return False
|
||||
|
||||
try:
|
||||
# 优先写入shm
|
||||
if self.__set_shm(key, value, timeout):
|
||||
return True
|
||||
except: pass
|
||||
|
||||
# 过期清理
|
||||
expires = self._normalize_timeout(timeout)
|
||||
self._prune()
|
||||
try:
|
||||
restricted_loads(pickle.dumps(value))
|
||||
except:
|
||||
return False
|
||||
|
||||
# 转换
|
||||
_val = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
|
||||
self._cache[key] = (expires,_val)
|
||||
self.set_session_by_file(key,_val,expires)
|
||||
return True
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
|
||||
# 类型判断
|
||||
if not isinstance(key,str): return False
|
||||
type_list=(int,float,bool,str,list,dict,tuple,set,bytes)
|
||||
value_type=type(value)
|
||||
if value_type not in type_list:
|
||||
return False
|
||||
|
||||
try:
|
||||
# 优先写入shm
|
||||
if self.__add_shm(key, value, timeout):
|
||||
return True
|
||||
except: pass
|
||||
|
||||
expires = self._normalize_timeout(timeout)
|
||||
self._prune()
|
||||
try:
|
||||
restricted_loads(pickle.dumps(value))
|
||||
except:
|
||||
return False
|
||||
item = (expires, pickle.dumps(value,pickle.HIGHEST_PROTOCOL))
|
||||
if key in self._cache:
|
||||
return False
|
||||
self._cache.setdefault(key, item)
|
||||
self.set_session_by_file(key,item[1],expires)
|
||||
return True
|
||||
|
||||
def delete(self, key):
|
||||
try:
|
||||
# 优先删除shm
|
||||
if self.__del_shm(key):
|
||||
return True
|
||||
except: pass
|
||||
|
||||
result = self._cache.pop(key, None) is not None
|
||||
self.del_session_by_file(key)
|
||||
return result
|
||||
|
||||
def has(self, key):
|
||||
try:
|
||||
# 优先shm
|
||||
if self.__has_shm(key):
|
||||
return True
|
||||
except: pass
|
||||
|
||||
try:
|
||||
expires, value = self._cache[key]
|
||||
return expires == 0 or expires > time()
|
||||
except KeyError:
|
||||
if self.get_session_by_file(key): return True
|
||||
return False
|
||||
|
||||
def get_expire_time(self, key):
|
||||
try:
|
||||
expires, value = self._cache[key]
|
||||
return expires
|
||||
except KeyError:
|
||||
return 0
|
||||
|
||||
def md5(self,strings):
|
||||
"""
|
||||
生成MD5
|
||||
@strings 要被处理的字符串
|
||||
return string(32)
|
||||
"""
|
||||
import hashlib
|
||||
m = hashlib.md5()
|
||||
|
||||
m.update(strings.encode('utf-8'))
|
||||
return m.hexdigest()
|
||||
|
||||
def __set_shm(self, key, value, timeout=None):
|
||||
'''
|
||||
@name 尝试将缓存写入shm目录
|
||||
@author Zhj<2022-10-08>
|
||||
@param key<string> 键名
|
||||
@param value<mixed> 值
|
||||
@param timeout<int> 存活时间/秒
|
||||
@return bool
|
||||
'''
|
||||
if key[:5] != self.__SHM_PREFIX:
|
||||
return False
|
||||
|
||||
self.__makesure_shm_basedir()
|
||||
|
||||
expires = struct.pack('f', self._normalize_timeout(timeout))
|
||||
filename = '/'.join((self.__SHM_BASEDIR, self.md5(key)))
|
||||
with open(filename, 'wb') as fp:
|
||||
fp.write(expires + pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
|
||||
os.chmod(filename, 384)
|
||||
|
||||
return True
|
||||
|
||||
def __get_shm(self, key):
|
||||
'''
|
||||
@name 尝试从shm目录下读取缓存
|
||||
@author Zhj<2022-10-08>
|
||||
@param key<string> 键名
|
||||
@return mixed|None
|
||||
'''
|
||||
if key[:5] != self.__SHM_PREFIX:
|
||||
return None
|
||||
|
||||
self.__makesure_shm_basedir()
|
||||
|
||||
filename = '/'.join((self.__SHM_BASEDIR, self.md5(key)))
|
||||
if not os.path.exists(filename): return None
|
||||
|
||||
with open(filename, 'rb') as fp:
|
||||
_val = fp.read()
|
||||
|
||||
expires = struct.unpack('f', _val[:4])[0]
|
||||
|
||||
# 过期 删除缓存文件
|
||||
if expires > 0 and expires <= time():
|
||||
os.remove(filename)
|
||||
return None
|
||||
|
||||
return pickle.loads(_val[4:])
|
||||
|
||||
def __del_shm(self, key):
|
||||
'''
|
||||
@name 删除shm目录下的缓存
|
||||
@author Zhj<2022-10-08>
|
||||
@param key<string> 键名
|
||||
@return bool
|
||||
'''
|
||||
if key[:5] != self.__SHM_PREFIX:
|
||||
return False
|
||||
|
||||
self.__makesure_shm_basedir()
|
||||
|
||||
filename = '/'.join((self.__SHM_BASEDIR, self.md5(key)))
|
||||
if os.path.exists(filename):
|
||||
os.remove(filename)
|
||||
|
||||
return True
|
||||
|
||||
def __has_shm(self, key):
|
||||
'''
|
||||
@name 检查shm目录下的缓存是否存在
|
||||
@author Zhj<2022-10-08>
|
||||
@param key<string> 键名
|
||||
@return bool
|
||||
'''
|
||||
if key[:5] != self.__SHM_PREFIX:
|
||||
return False
|
||||
|
||||
self.__makesure_shm_basedir()
|
||||
|
||||
filename = '/'.join((self.__SHM_BASEDIR, self.md5(key)))
|
||||
if not os.path.exists(filename): return False
|
||||
|
||||
# 获取缓存过期时间
|
||||
with open(filename, 'rb') as fp:
|
||||
expires = struct.unpack('f', fp.read(4))[0]
|
||||
|
||||
# 过期 删除缓存文件
|
||||
if expires > 0 and expires <= time():
|
||||
os.remove(filename)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __add_shm(self, key, value, timeout=None):
|
||||
'''
|
||||
@name 尝试添加缓存到shm目录下
|
||||
@author Zhj<2022-10-08>
|
||||
@param key<string> 键名
|
||||
@param value<mixed> 值
|
||||
@param timeout<int> 缓存存活时间/秒
|
||||
@return bool
|
||||
'''
|
||||
if self.__has_shm(key):
|
||||
return False
|
||||
|
||||
return self.__set_shm(key, value, timeout)
|
||||
|
||||
def __makesure_shm_basedir(self):
|
||||
'''
|
||||
@name 确保shm下的缓存目录存在
|
||||
@author Zhj<2022-10-08>
|
||||
@return void
|
||||
'''
|
||||
if not os.path.exists(self.__SHM_BASEDIR):
|
||||
os.makedirs(self.__SHM_BASEDIR, 384)
|
||||
|
||||
64
class/cachelib/uwsgi.py
Normal file
64
class/cachelib/uwsgi.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import platform
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: no cover
|
||||
import pickle
|
||||
|
||||
from cachelib.base import BaseCache
|
||||
|
||||
|
||||
class UWSGICache(BaseCache):
|
||||
""" Implements the cache using uWSGI's caching framework.
|
||||
|
||||
.. note::
|
||||
This class cannot be used when running under PyPy, because the uWSGI
|
||||
API implementation for PyPy is lacking the needed functionality.
|
||||
|
||||
:param default_timeout: The default timeout in seconds.
|
||||
:param cache: The name of the caching instance to connect to, for
|
||||
example: mycache@localhost:3031, defaults to an empty string, which
|
||||
means uWSGI will cache in the local instance. If the cache is in the
|
||||
same instance as the werkzeug app, you only have to provide the name of
|
||||
the cache.
|
||||
"""
|
||||
def __init__(self, default_timeout=300, cache=''):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
|
||||
if platform.python_implementation() == 'PyPy':
|
||||
raise RuntimeError("uWSGI caching does not work under PyPy, see "
|
||||
"the docs for more details.")
|
||||
|
||||
try:
|
||||
import uwsgi
|
||||
self._uwsgi = uwsgi
|
||||
except ImportError:
|
||||
raise RuntimeError("uWSGI could not be imported, are you "
|
||||
"running under uWSGI?")
|
||||
|
||||
self.cache = cache
|
||||
|
||||
def get(self, key):
|
||||
rv = self._uwsgi.cache_get(key, self.cache)
|
||||
if rv is None:
|
||||
return
|
||||
return pickle.loads(rv)
|
||||
|
||||
def delete(self, key):
|
||||
return self._uwsgi.cache_del(key, self.cache)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
return self._uwsgi.cache_update(key, pickle.dumps(value),
|
||||
self._normalize_timeout(timeout),
|
||||
self.cache)
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
return self._uwsgi.cache_set(key, pickle.dumps(value),
|
||||
self._normalize_timeout(timeout),
|
||||
self.cache)
|
||||
|
||||
def clear(self):
|
||||
return self._uwsgi.cache_clear(self.cache)
|
||||
|
||||
def has(self, key):
|
||||
return self._uwsgi.cache_exists(key, self.cache) is not None
|
||||
Reference in New Issue
Block a user