#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (C) 2013-2016 Étienne Loks # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # See the file COPYING for details. from cairosvg import svg2png from csv import QUOTE_ALL import datetime from functools import wraps from itertools import chain from inspect import currentframe, getframeinfo import hashlib from importlib import import_module import io from jinja2 import Template import os import random import re import requests from secretary import Renderer as MainSecretaryRenderer, UndefinedSilently import shutil import six import subprocess import sys import tempfile import time import uuid import xmltodict import zipfile from django import forms from django.apps import apps from django.conf import settings from django.conf.urls import url from django.contrib.contenttypes.models import ContentType from django.contrib.gis.geos import GEOSGeometry from django.contrib.sessions.backends.db import SessionStore from django.core.cache import cache from django.core.exceptions import SuspiciousOperation, ObjectDoesNotExist from django.core.files import File from django.core.validators import EMPTY_VALUES from django.core.urlresolvers import reverse from django.db import models from django.http import HttpResponseRedirect from django.utils.datastructures import MultiValueDict as BaseMultiValueDict from django.utils.safestring import mark_safe from django.template.defaultfilters import slugify if settings.USE_TRANSLATION_OVERLOAD: from overload_translation.utils import ( ugettext_lazy, ugettext, pgettext_lazy, pgettext, ) else: from django.utils.translation import ( ugettext_lazy, ugettext, pgettext_lazy, pgettext, ) _ = ugettext_lazy def dict_to_tuple(dct): values = [] for k, v in dct.items(): if isinstance(v, dict): v = dict_to_tuple(v) values.append((k, v)) return tuple(values) def debug_line_no(): return currentframe().f_back.f_lineno def fake_task(*args): def fake(func): return func return fake task = fake_task celery_app = None if settings.USE_BACKGROUND_TASK: try: from celery import shared_task task = shared_task celery_app = getattr( import_module(settings.ROOT_PATH.split("/")[-2] + ".celery_app"), "app" ) except ModuleNotFoundError: pass class BColors: """ Bash colors. Don't forget to finish your colored string with ENDC. """ HEADER = "\033[95m" OKBLUE = "\033[94m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" class Round(models.Func): function = "ROUND" arity = 2 arg_joiner = "::numeric, " CSV_OPTIONS = {"delimiter": ",", "quotechar": '"', "quoting": QUOTE_ALL} def is_safe_path(basedir, path, follow_symlinks=True): # resolves symbolic links if follow_symlinks: return os.path.realpath(path).startswith(basedir) return os.path.abspath(path).startswith(basedir) def import_class(full_path_classname): """ Return the model class from the full path """ mods = full_path_classname.split(".") if len(mods) == 1: mods = ["ishtar_common", "models", mods[0]] elif ( "models" not in mods and "models_finds" not in mods and "models_treatments" not in mods ): raise SuspiciousOperation("Try to import a non model from a string") module = import_module(".".join(mods[:-1])) model = getattr(module, mods[-1]) if not issubclass(model, models.Model): raise SuspiciousOperation("Try to import a non model from a string") return model def check_rights(rights=None, redirect_url="/"): """ Decorator that checks the rights to access the view. """ def decorator(view_func): def _wrapped_view(request, *args, **kwargs): if not rights: return view_func(request, *args, **kwargs) if hasattr(request.user, "ishtaruser"): if request.user.ishtaruser.has_right("administrator", request.session): kwargs["current_right"] = "administrator" return view_func(request, *args, **kwargs) for right in rights: # be careful to put the more permissive rights first # if granted it can allow more if request.user.ishtaruser.has_right(right, request.session): kwargs["current_right"] = right return view_func(request, *args, **kwargs) put_session_message( request.session.session_key, _("You don't have sufficient permissions to do this action."), "warning", ) return HttpResponseRedirect(redirect_url) return _wrapped_view return decorator def check_rights_condition(rights): """ To be used to check in wizard condition_dict """ def func(self): request = self.request if request.user.ishtaruser.has_right("administrator", request.session): return True for right in rights: if request.user.ishtaruser.has_right(right, request.session): return True return False return func def check_model_access_control(request, model, available_perms=None): """ Check access control to a model for a specific request :param request: the current request :param model: the concerned model :param available_perms: specific permissions to check if not specified "view" and "view_own" will be checked :return: (allowed, own) tuple """ own = True # more restrictive by default allowed = False if not request.user.is_authenticated(): return allowed, own if not available_perms: available_perms = [ "view_" + model.__name__.lower(), "view_own_" + model.__name__.lower(), ] try: ishtaruser = request.user.ishtaruser except request.user._meta.model.ishtaruser.RelatedObjectDoesNotExist: return False, True if ishtaruser.has_right("administrator", session=request.session): allowed = True own = False return allowed, own for perm, lbl in model._meta.permissions: if perm not in available_perms: continue if ishtaruser.person.has_right(perm, session=request.session): allowed = True if "_own_" not in perm: own = False break # max right reach return allowed, own def update_data(data_1, data_2, merge=False): """ Update a data directory taking account of key detail """ res = {} if not isinstance(data_1, dict) or not isinstance(data_2, dict): if data_2 and not data_1: return data_2 if not merge: return data_1 if data_2 and data_2 != data_1: return data_1 + " ; " + data_2 return data_1 for k in data_1: if k not in data_2: res[k] = data_1[k] else: res[k] = update_data(data_1[k], data_2[k], merge=merge) for k in data_2: if k not in data_1: res[k] = data_2[k] return res def move_dict_data(data, key1, key2): """ Move key1 value to key2 value in a data dict :param data: data dict (with subdicts) :param key1: key to move (with __ notation for hierarchy - begining with "data__") :param key2: target key (with __ notation for hierarchy - begining with "data__") :return: result data """ keys1 = key1.split("__") keys2 = key2.split("__") value = data for idx, key in enumerate(keys1): if not idx: if key != "data": return data continue if key not in value: return data if idx == (len(keys1) - 1): # last value = value.pop(key) # remove from data else: value = value[key] new_value = data for idx, key in enumerate(keys2): if not idx: if key != "data": return data continue if idx == (len(keys2) - 1): # last new_value[key] = value else: if key not in new_value: new_value[key] = {} new_value = new_value[key] return data def clean_empty_data(data): """ Clean empty branches of a data dict """ for key in data.keys(): if data[key] in [{}, None, ""]: data.pop(key) continue if isinstance(data[key], dict): data[key] = clean_empty_data(data[key]) return data class MultiValueDict(BaseMultiValueDict): def get(self, *args, **kwargs): v = super(MultiValueDict, self).getlist(*args, **kwargs) if callable(v): v = v() if type(v) in (list, tuple) and len(v) > 1: v = ",".join(v) elif type(v) not in (int, str): v = super(MultiValueDict, self).get(*args, **kwargs) return v def getlist(self, *args, **kwargs): lst = super(MultiValueDict, self).getlist(*args, **kwargs) if type(lst) not in (tuple, list): lst = [lst] return lst def is_downloadable(url): """ Does the url contain a downloadable resource """ h = requests.head(url, allow_redirects=True) header = h.headers content_type = header.get("content-type") if "text" in content_type.lower(): return False if "html" in content_type.lower(): return False return True def get_current_year(): return datetime.datetime.now().year def get_cache(cls, extra_args=tuple(), app_label=None): if not app_label: app_label = cls._meta.app_label cache_key = "{}-{}-{}".format(settings.PROJECT_SLUG, app_label, cls.__name__) for arg in extra_args: if not arg: cache_key += "-0" else: if type(arg) == dict: cache_key += "-" + "_".join([str(arg[k]) for k in arg]) elif type(arg) in (list, tuple): cache_key += "-" + "_".join([str(v) for v in arg]) else: cache_key += "-" + str(arg) cache_key = slugify(cache_key) if not cache_key.endswith("_current_keys") and hasattr( cls, "_add_cache_key_to_refresh" ): cls._add_cache_key_to_refresh(extra_args) if len(cache_key) >= 250: m = hashlib.md5() m.update(cache_key) cache_key = m.hexdigest() return cache_key, cache.get(cache_key) def force_cached_label_changed(sender, **kwargs): if not kwargs.get("instance"): return kwargs["instance"]._cached_label_checked = False cached_label_changed(sender, **kwargs) class SecretaryRenderer(MainSecretaryRenderer): def _pack_document(self, files): """ Overload _pack_document: obsolete files can be referenced - continue on null content for files """ self.log.debug("packing document") zip_file = io.BytesIO() zipdoc = zipfile.ZipFile(zip_file, "a") for fname, content in files.items(): if isinstance(content, UndefinedSilently): continue if sys.version_info >= (2, 7): zipdoc.writestr(fname, content, zipfile.ZIP_DEFLATED) else: zipdoc.writestr(fname, content) self.log.debug("Document packing completed") return zip_file def serialize_args_for_tasks(sender, instance, kwargs, extra_kwargs=None): if "instance" in kwargs: kwargs["instance"] = kwargs["instance"].pk sender = (sender._meta.app_label, sender._meta.object_name) if extra_kwargs: for kw in extra_kwargs: if getattr(instance, kw, None): kwargs[kw] = getattr(instance, kw) for k in list(kwargs.keys()): if k in ["model", "signal", "_cached_labels_bulk_update"] or kwargs[k] is None: kwargs.pop(k) continue if isinstance(kwargs[k], set): kwargs[k] = list(kwargs[k]) return sender, kwargs def deserialize_args_for_tasks(sender, kwargs, extra_kwargs=None): if "instance" not in kwargs: return sender, None if not isinstance(sender, (tuple, list)): # not task return sender, kwargs["instance"] sender = apps.get_model(*sender) instance = None retried = 0 # object can be in cache of another thread but not yet commited # waiting for it while not instance and retried < 6: if retried: time.sleep(0.5) if sender.objects.filter(pk=kwargs["instance"]).count(): instance = sender.objects.get(pk=kwargs["instance"]) else: retried += 1 if not instance: return sender, None # object is not here anymore if extra_kwargs: for kw in extra_kwargs: if kw in kwargs: setattr(instance, kw, kwargs[kw]) return sender, instance EXTRA_KWARGS_TRIGGER = [ "_cascade_change", "_cached_labels_bulk_update", "skip_history_when_saving", "_post_saved_geo", "_search_updated", "_cached_label_checked", ] def cached_label_and_geo_changed(sender, **kwargs): instance = kwargs["instance"] if getattr(instance, "_no_post_save", False): return cached_label_changed(sender=sender, **kwargs) post_save_geo(sender=sender, **kwargs) def revoke_old_task(kwargs, action_name, task_id, instance_cls): kwargs["action"] = action_name key, old_task_id = get_cache( instance_cls, tuple(f"{k}:{v}" for k, v in kwargs.items()) ) if old_task_id: try: celery_app.control.revoke(old_task_id) except ConnectionResetError: # task already revoked or done pass cache.set(key, task_id, settings.CACHE_TIMEOUT * 4) def cached_label_changed(sender, **kwargs): if not kwargs.get("instance"): return instance = kwargs.get("instance") if hasattr(instance, "test_obj"): instance.test_obj.reached(sender, **kwargs) # cache_key, value = get_cache( # sender, ["cached_label_changed", kwargs['instance'].pk] # ) # if value and not settings.DISABLE_TASK_TIMEOUT: # # multiple request too quick # return # cache.set(cache_key, True, settings.CACHE_TASK_TIMEOUT) if ( not settings.USE_BACKGROUND_TASK or not instance.pk or not sender.objects.filter(pk=instance.pk).count() ): # no background task or not yet fully saved return _cached_label_changed(sender, **kwargs) if getattr(instance, "_cascade_change", False): kwargs["cascade_change"] = True sender, kwargs = serialize_args_for_tasks( sender, instance, kwargs, EXTRA_KWARGS_TRIGGER ) task_item = _cached_label_changed.delay(sender, **kwargs) revoke_old_task(kwargs, "cached_label_changed", task_item.id, instance.__class__) return task_item @task() def _cached_label_changed(sender, **kwargs): sender, instance = deserialize_args_for_tasks(sender, kwargs, EXTRA_KWARGS_TRIGGER) if not instance: return force_update = kwargs.get("force_update", False) if hasattr(instance, "need_update") and instance.need_update: force_update = True instance.skip_history_when_saving = True if not force_update and getattr(instance, "_cached_label_checked", False): return if hasattr(instance, "refresh_cache"): instance.refresh_cache() instance._cached_label_checked = True cached_labels = ["cached_label"] if hasattr(instance, "CACHED_LABELS"): cached_labels = instance.CACHED_LABELS changed = [] for cached_label in cached_labels: gen_func = "_generate_" + cached_label if not hasattr(instance, gen_func): continue lbl = getattr(instance, gen_func)() if lbl != getattr(instance, cached_label): changed.append((cached_label, lbl)) setattr(instance, cached_label, lbl) # update for cache if hasattr(instance, "need_update") and instance.need_update: changed.append(("need_update", False)) instance.need_update = False if changed: instance._search_updated = False if hasattr(instance, "_cascade_change") and instance._cascade_change: instance.skip_history_when_saving = True instance.__class__.objects.filter(pk=instance.pk).update(**dict(changed)) if (changed or not cached_labels) and hasattr(instance, "cascade_update"): instance.cascade_update() updated = False if force_update or hasattr(instance, "update_search_vector"): updated = instance.update_search_vector() if hasattr(instance, "_cached_labels_bulk_update"): updated = instance._cached_labels_bulk_update() or updated if not updated and hasattr(instance, "_get_associated_cached_labels"): for item in instance._get_associated_cached_labels(): item._cascade_change = True if hasattr(instance, "test_obj"): item.test_obj = instance.test_obj cached_label_changed(item.__class__, instance=item) cache_key, __ = get_cache(sender, ["cached_label_changed", instance.pk]) cache.set(cache_key, None, settings.CACHE_TASK_TIMEOUT) if cached_labels: return getattr(instance, cached_labels[0], "") return "" def regenerate_all_cached_labels(model): """ When the rule for generating cached label change. Regeneration of all label has to be done. :param model: model class concerned """ for item in model.all(): item.skip_history_when_saving = True cached_label_changed(model, instance=item) def shortify(lbl, number=20): SHORTIFY_STR = ugettext(" (...)") if not lbl: lbl = "" if len(lbl) <= number: return lbl return lbl[: number - len(SHORTIFY_STR)] + SHORTIFY_STR def mode(array): most = max(list(map(array.count, array))) return list(set(filter(lambda x: array.count(x) == most, array))) def disable_for_loaddata(signal_handler): """ Decorator that turns off signal handlers when loading fixture data. """ @wraps(signal_handler) def wrapper(*args, **kwargs): if kwargs.get("raw"): return signal_handler(*args, **kwargs) return wrapper def _get_image_link(doc): from ishtar_common.models import IshtarSiteProfile # manage missing images if not doc.thumbnail or not doc.thumbnail.url or not doc.image or not doc.image.url: return "" item = None for related_key in doc.__class__.RELATED_MODELS: q = getattr(doc, related_key) if q.count(): item = q.all()[0] break if not item: # image attached to nothing... return "" item_class_name = str(item.__class__._meta.verbose_name) if item.__class__.__name__ == "ArchaeologicalSite": item_class_name = str(IshtarSiteProfile.get_default_site_label()) return mark_safe( """

{}

{}

{}
""".format( doc.image.url, doc.thumbnail.url, item_class_name, str(item), reverse(item.SHOW_URL, args=[item.pk, ""]), str(_("Information")), str(_("Load another random image?")), ) ) def get_random_item_image_link(request): from ishtar_common.models import Document if not hasattr(request.user, "ishtaruser"): return "" ishtar_user = request.user.ishtaruser if not ishtar_user.has_right( "ishtar_common.view_document", session=request.session ): return "" q = ( Document.objects.filter(thumbnail__isnull=False, image__isnull=False) .exclude(thumbnail="") .exclude(image="") ) total = q.count() if not total: return "" image_nb = random.randint(0, total - 1) return _get_image_link(q.all()[image_nb]) def convert_coordinates_to_point(x, y, z=None, srid=4326): if z: geom = GEOSGeometry("POINT({} {} {})".format(x, y, z), srid=srid) else: geom = GEOSGeometry("POINT({} {})".format(x, y), srid=srid) if not geom.valid: raise forms.ValidationError(geom.valid_reason) return geom def get_srid_obj_from_point(point): from ishtar_common.models import SpatialReferenceSystem try: return SpatialReferenceSystem.objects.get(srid=int(point.srid)) except SpatialReferenceSystem.DoesNotExist: return SpatialReferenceSystem.objects.create( srid=int(point.srid), auth_name="EPSG", label="EPSG-{}".format(point.srid), txt_idx="epsg-{}".format(point.srid), ) def post_save_geo(sender, **kwargs): """ Convert raw x, y, z point to real geo field """ if not kwargs.get("instance"): return # cache_key, value = get_cache( # sender, ["post_save_geo", kwargs['instance'].pk]) # if value and not settings.DISABLE_TASK_TIMEOUT: # # multiple request too quick # return # cache.set(cache_key, True, settings.CACHE_TASK_TIMEOUT) instance = kwargs.get("instance") if hasattr(instance, "_no_geo_check") and instance._no_geo_check: return if not settings.USE_BACKGROUND_TASK: return _post_save_geo(sender, **kwargs) sender, kwargs = serialize_args_for_tasks( sender, instance, kwargs, EXTRA_KWARGS_TRIGGER ) task_item = _post_save_geo.delay(sender, **kwargs) revoke_old_task(kwargs, "post_save_geo", task_item.id, instance.__class__) return task_item @task() def _post_save_geo(sender, **kwargs): """ Convert raw x, y, z point to real geo field """ profile = get_current_profile() if not profile.mapping: return sender, instance = deserialize_args_for_tasks(sender, kwargs, EXTRA_KWARGS_TRIGGER) if not instance: return kls_name = instance.__class__.__name__ if not profile.locate_warehouses and ( "Container" in kls_name or "Warehouse" in kls_name ): return if getattr(instance, "_post_saved_geo", False): return # print(sender, "post_save_geo") current_source = "default" if hasattr(instance.__class__, "_meta"): current_source = str(instance.__class__._meta.verbose_name) modified = False if hasattr(instance, "multi_polygon") and not getattr( instance, "DISABLE_POLYGONS", False ): if ( instance.multi_polygon_source_item and instance.multi_polygon_source_item != current_source ): # refetch instance.multi_polygon = None instance.multi_polygon_source = None modified = True if instance.multi_polygon and not instance.multi_polygon_source: # should be a db source instance.multi_polygon_source = "P" instance.multi_polygon_source_item = current_source elif instance.multi_polygon_source != "P": precise_poly = instance.get_precise_polygons() if precise_poly: poly, source_item = precise_poly instance.multi_polygon = poly instance.multi_polygon_source = "P" instance.multi_polygon_source_item = source_item modified = True elif profile.use_town_for_geo: poly = instance.get_town_polygons() if poly: poly, poly_source = poly if poly != instance.multi_polygon: instance.multi_polygon_source_item = poly_source instance.multi_polygon_source = "T" # town try: instance.multi_polygon = poly modified = True except TypeError: print(instance, instance.pk) if ( instance.point_source_item and instance.point_source_item != current_source ) or ( instance.point_source == "M" ): # refetch csrs = instance.spatial_reference_system if instance.x and instance.y: new_point = GEOSGeometry( "POINT({} {})".format(instance.x, instance.y), srid=csrs.srid ) if instance.point_2d: proj_point = instance.point_2d.transform(csrs.srid, clone=True) if new_point.distance(proj_point) < 0.01: instance.x, instance.y = None, None instance.point, instance.point_2d = None, None instance.point_source = None point = instance.point point_2d = instance.point_2d if ( (point or point_2d) and instance.x is None and not instance.point_source ): # db source if point: current_point = point instance.z = point.z else: current_point = point_2d instance.x = current_point.x instance.y = current_point.y srs = get_srid_obj_from_point(current_point) instance.spatial_reference_system = srs instance.point_source = "P" instance.point_source_item = current_source if not point_2d: instance.point_2d = convert_coordinates_to_point( instance.point.x, instance.point.y, srid=current_point.srid ) modified = True elif ( instance.x and instance.y and instance.spatial_reference_system and instance.spatial_reference_system.auth_name == "EPSG" and instance.spatial_reference_system.srid != 0 ): # form input or already precise try: point_2d = convert_coordinates_to_point( instance.x, instance.y, srid=instance.spatial_reference_system.srid ) except forms.ValidationError: return # irrelevant data in DB distance = 1 # arbitrary if point_2d and instance.point_2d: distance = point_2d.transform(4326, clone=True).distance( instance.point_2d.transform(4326, clone=True) ) if instance.z: point = convert_coordinates_to_point( instance.x, instance.y, instance.z, srid=instance.spatial_reference_system.srid, ) # no change if distance inf to 1 mm if distance >= 0.0001 and ( point_2d != instance.point_2d or point != instance.point ): instance.point = point instance.point_2d = point_2d instance.point_source = "P" instance.point_source_item = current_source modified = True else: instance.point_source = None # get coordinates from parents precise_points = instance.get_precise_points() if not (instance.multi_polygon and instance.multi_polygon_source == "P") and \ precise_points: point_2d, point, source_item = precise_points instance.point_2d = point_2d instance.point = point instance.point_source = "P" instance.point_source_item = source_item instance.x = point_2d.x instance.y = point_2d.y if point: instance.z = point.z srs = get_srid_obj_from_point(point_2d) instance.spatial_reference_system = srs modified = True else: centroid, source, point_source = None, None, None if instance.multi_polygon and instance.multi_polygon_source == "P": source = current_source centroid = instance.multi_polygon.centroid point_source = "M" if not centroid and profile.use_town_for_geo: # try to get from # parent town_centroid = instance.get_town_centroid() if town_centroid: centroid, source = town_centroid point_source = "T" if centroid: instance.point_2d, instance.point_source_item = centroid, source instance.point = None instance.point_source = point_source instance.x = instance.point_2d.x instance.y = instance.point_2d.y srs = get_srid_obj_from_point(instance.point_2d) instance.spatial_reference_system = srs modified = True else: instance.point_2d, instance.point_source_item = None, None instance.point = None instance.point_source = None modified = True if hasattr(instance, "need_update") and instance.need_update: instance.need_update = False modified = True if modified: instance.skip_history_when_saving = True instance._post_saved_geo = True instance._cached_label_checked = False instance.save() if hasattr(instance, "cascade_update"): instance.cascade_update() cache_key, __ = get_cache(sender, ["post_save_geo", instance.pk]) cache.set(cache_key, None, settings.CACHE_TASK_TIMEOUT) return def create_slug(model, name, slug_attr="slug", max_length=100): base_slug = slugify(name) slug = base_slug[:max_length] final_slug = None idx = 1 while not final_slug: if slug and not model.objects.filter(**{slug_attr: slug}).exists(): final_slug = slug break slug = base_slug[: (max_length - 1 - len(str(idx)))] + "-" + str(idx) idx += 1 return final_slug def get_all_field_names(model): return list( set( chain.from_iterable( (field.name, field.attname) if hasattr(field, "attname") else (field.name,) for field in model._meta.get_fields() if not (field.many_to_one and field.related_model is None) ) ) ) def get_all_related_m2m_objects_with_model(model): return [ (f, f.model if f.model != model else None) for f in model._meta.get_fields(include_hidden=True) if f.many_to_many and f.auto_created ] def get_all_related_many_to_many_objects(model): return [ f for f in model._meta.get_fields(include_hidden=True) if f.many_to_many and f.auto_created ] def get_all_related_objects(model): return [ f for f in model._meta.get_fields() if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete ] def num2col(n): string = "" while n > 0: n, remainder = divmod(n - 1, 26) string = chr(65 + remainder) + string return string def merge_tsvectors(vectors): """ Parse tsvector to merge them in one string :param vectors: list of tsvector string :return: merged tsvector """ result_dict = {} for vector in vectors: if not vector: continue current_position = 0 if result_dict: for key in result_dict: max_position = max(result_dict[key]) if max_position > current_position: current_position = max_position for dct_member in vector.split(" "): splitted = dct_member.split(":") key = ":".join(splitted[:-1]) positions = splitted[-1] key = key[1:-1] # remove quotes positions = [int(pos) + current_position for pos in positions.split(",")] if key in result_dict: result_dict[key] += positions else: result_dict[key] = positions # {'lamelie': [1, 42, 5]} => {'lamelie': "1,42,5"} result_dict = { k: ",".join([str(val) for val in result_dict[k]]) for k in result_dict } # {'lamelie': "1,5", "hagarde": "2", "regarde": "4"} => # "'lamelie':1,5 'hagarde':2 'regarde':4" result = " ".join(["'{}':{}".format(k, result_dict[k]) for k in result_dict]) return result def put_session_message(session_key, message, message_type): session = SessionStore(session_key=session_key) messages = [] if "messages" in session: messages = session["messages"][:] messages.append((str(message), message_type)) session["messages"] = messages session.save() def put_session_var(session_key, key, value): session = SessionStore(session_key=session_key) session[key] = value session.save() def get_session_var(session_key, key): session = SessionStore(session_key=session_key) if key not in session: return return session[key] def clean_session_cache(session): # clean session cache cache_key_list = "sessionlist-{}".format(session.session_key) key_list = cache.get(cache_key_list, []) for key in key_list: cache.set(key, None, settings.CACHE_TIMEOUT) cache.set(cache_key_list, [], settings.CACHE_TIMEOUT) def get_field_labels_from_path(model, path): """ :param model: base model :param path: list of attribute starting from the base model :return: list of labels """ labels = [] for key in path: try: field = model._meta.get_field(key) except: labels.append(key) continue if hasattr(field, "verbose_name"): labels.append(field.verbose_name) else: labels.append(key) return labels def create_default_areas(models=None, verbose=False): # can be used on migrations if models are provided if not models: from ishtar_common.models import Area, Town, Department, State else: Area = models["area"] Town = models["town"] Department = models["department"] State = models["state"] areas = {} idx = 0 for state in State.objects.all(): slug = "state-" + slugify(state.label) area, created = Area.objects.get_or_create( txt_idx=slug, defaults={"label": state.label} ) areas["state-{}".format(state.pk)] = area if created: idx += 1 if verbose: print("\n* {} state areas added".format(idx)) idx, idx2 = 0, 0 for dep in Department.objects.all(): slug = "dep-" + slugify(dep.label) area, created = Area.objects.get_or_create( txt_idx=slug, defaults={"label": dep.label} ) areas["dep-" + dep.number] = area if created: idx += 1 if not dep.state_id: continue state_slug = "state-{}".format(dep.state_id) if state_slug not in areas: continue if area.parent and area.parent.pk == areas[state_slug].pk: continue idx2 += 1 area.parent = areas[state_slug] area.save() if verbose: print( "* {} department areas added with {} associations to state".format( idx, idx2 ) ) idx = 0 for town in Town.objects.all(): if not town.numero_insee or len(town.numero_insee) != 5: continue code_dep = "dep-" + town.numero_insee[:2] code_dep_dom = "dep-" + town.numero_insee[:3] if code_dep in areas: if not areas[code_dep].towns.filter(pk=town.pk).count(): areas[code_dep].towns.add(town) idx += 1 elif code_dep_dom in areas: if not areas[code_dep_dom].towns.filter(pk=town.pk).count(): areas[code_dep_dom].towns.add(town) idx += 1 if verbose: print("* {} town associated to department area".format(idx)) def get_relations_for_graph( rel_model, obj_pk, above_relations=None, equal_relations=None, treated=None, styles=None, render_above=True, render_below=True, full=False, ): """ Get all above and equal relations of an object (get all child and parent relations) :param rel_model: the relation model concerned :param obj_pk: id of an object with relations :param above_relations: list of current above_relations :param equal_relations: list of current equal_relations :param treated: treated relation list to prevent circular call :param styles: current styles :param render_above: render relation above the current object :param render_below: render relation below the current object :param full: render the full graph :return: above and equal relations list (each containing lists of two members) """ if not above_relations: above_relations = [] if not equal_relations: equal_relations = [] if not treated: treated = [] if not styles: styles = {} if obj_pk in treated: return above_relations, equal_relations, styles treated.append(obj_pk) for q, inverse in ( ( rel_model.objects.filter( left_record_id=obj_pk, relation_type__logical_relation__isnull=False ), False, ), ( rel_model.objects.filter( right_record_id=obj_pk, relation_type__logical_relation__isnull=False ), True, ), ): q = q.values( "left_record_id", "right_record_id", "relation_type__logical_relation" ) get_above, get_below = render_above, render_below if inverse and (not render_above or not render_below): get_above, get_below = not render_above, not render_below for relation in q.all(): logical_relation = relation["relation_type__logical_relation"] left_record = relation["left_record_id"] right_record = relation["right_record_id"] is_above, is_below = False, False if not logical_relation: continue elif ( get_below and logical_relation == "above" and (left_record, right_record) not in above_relations ): above_relations.append((left_record, right_record)) is_below = True elif ( get_above and logical_relation == "below" and (right_record, left_record) not in above_relations ): above_relations.append((right_record, left_record)) is_above = True elif ( logical_relation == "equal" and (right_record, left_record) not in equal_relations and (left_record, right_record) not in equal_relations ): equal_relations.append((left_record, right_record)) else: continue if right_record == obj_pk: other_record = left_record else: other_record = right_record if get_above and get_below and not full and (is_below or is_above): if (is_above and not inverse) or (is_below and inverse): ar, er, substyles = get_relations_for_graph( rel_model, other_record, above_relations, equal_relations, treated, styles, render_above=True, render_below=False, ) else: ar, er, substyles = get_relations_for_graph( rel_model, other_record, above_relations, equal_relations, treated, styles, render_above=False, render_below=True, ) else: ar, er, substyles = get_relations_for_graph( rel_model, other_record, above_relations, equal_relations, treated, styles, render_above=render_above, render_below=render_below, full=full, ) styles.update(substyles) error_style = "color=red" for r in ar: if r not in above_relations: above_relations.append(r) reverse_rel = tuple(reversed(r)) if reverse_rel in above_relations: # circular if r not in styles: styles[r] = [] if reverse_rel not in styles: styles[reverse_rel] = [] if error_style not in styles[r]: styles[r].append(error_style) if error_style not in styles[reverse_rel]: styles[reverse_rel].append(error_style) if r[0] == r[1]: # same entity if r not in styles: styles[r] = [] if error_style not in styles[r]: styles[r].append("color=red") for r in er: if r not in equal_relations: equal_relations.append(r) return above_relations, equal_relations, styles def generate_relation_graph( obj, highlight_current=True, render_above=True, render_below=True, full=False, debug=False, ): if not settings.DOT_BINARY: return model = obj.__class__ rel_model = model._meta.get_field("right_relations").related_model # get relations above_relations, equal_relations, styles = get_relations_for_graph( rel_model, obj.pk, render_above=render_above, render_below=render_below, full=full, ) # generate dotfile dot_str = "digraph relations {\nnode [shape=box];\n" rel_str = "" described = [] if not above_relations and not equal_relations: rel_str += "subgraph NoDir {\nedge [dir=none,style=dashed];\n" style = 'label="{}"'.format(obj.relation_label) if highlight_current: style += ',style=filled,fillcolor="#C6C0C0"' dot_str += 'item{}[{},href="{}"];\n'.format( obj.pk, style, reverse("display-item", args=[model.SLUG, obj.pk]) ) rel_str += "}\n" for list, directed in ((above_relations, True), (equal_relations, False)): if directed: rel_str += "subgraph Dir {\n" else: rel_str += "subgraph NoDir {\nedge [dir=none,style=dashed];\n" for left_pk, right_pk in list: if left_pk not in described: described.append(left_pk) left = model.objects.get(pk=left_pk) style = 'label="{}"'.format(left.relation_label) if left.pk == obj.pk and highlight_current: style += ',style=filled,fillcolor="#C6C0C0"' dot_str += 'item{}[{},href="{}"];\n'.format( left.pk, style, reverse("display-item", args=[model.SLUG, left.pk]) ) if right_pk not in described: described.append(right_pk) right = model.objects.get(pk=right_pk) style = 'label="{}"'.format(right.relation_label) if right.pk == obj.pk and highlight_current: style += ',style=filled,fillcolor="#C6C0C0"' dot_str += 'item{}[{},href="{}"];\n'.format( right.pk, style, reverse("display-item", args=[model.SLUG, right.pk]), ) if not directed: # on the same level rel_str += "{{rank = same; item{}; item{};}}\n".format( left_pk, right_pk ) style = "" if (left_pk, right_pk) in styles: style = " [{}]".format(", ".join(styles[(left_pk, right_pk)])) rel_str += "item{} -> item{}{};\n".format(left_pk, right_pk, style) rel_str += "}\n" dot_str += rel_str + "\n}" tempdir = tempfile.mkdtemp("-ishtardot") dot_name = tempdir + os.path.sep + "relations.dot" with open(dot_name, "w") as dot_file: dot_file.write(dot_str) if not render_above: suffix = "_below" elif not render_below: suffix = "_above" else: suffix = "" if full and obj.MAIN_UP_MODEL_QUERY and getattr(obj, obj.MAIN_UP_MODEL_QUERY): obj = getattr(obj, obj.MAIN_UP_MODEL_QUERY) with open(dot_name, "r") as dot_file: django_file = File(dot_file) attr = "relation_dot" + suffix getattr(obj, attr).save("relations.dot", django_file, save=True) # execute dot program args = (settings.DOT_BINARY, "-Tsvg", dot_name) svg_tmp_name = tempdir + os.path.sep + "relations.svg" with open(svg_tmp_name, "w") as svg_file: popen = subprocess.Popen(args, stdout=svg_file) popen.wait() # scale image if necessary with open(svg_tmp_name, "r") as svg_file: doc = xmltodict.parse(svg_file.read()) width = doc["svg"]["@width"] if width.endswith("pt"): width = float(width[:-2]) if width > 600: doc["svg"].pop("@height") doc["svg"].pop("@width") doc["svg"]["@preserveAspectRatio"] = "xMinYMin meet" with open(svg_tmp_name, "w") as svg_file: svg_file.write(xmltodict.unparse(doc)) with open(svg_tmp_name, "r") as svg_file: django_file = File(svg_file) attr = "relation_image" + suffix getattr(obj, attr).save("relations.svg", django_file, save=True) png_name = tempdir + os.path.sep + "relations.png" with open(png_name, "wb") as png_file: svg2png(open(svg_tmp_name, "rb").read(), write_to=png_file) with open(png_name, "rb") as png_file: django_file = File(png_file) attr = "relation_bitmap_image" + suffix getattr(obj, attr).save("relations.png", django_file, save=True) if debug: print("DOT file: {}. Tmp SVG file: {}.".format(dot_name, svg_tmp_name)) return shutil.rmtree(tempdir) def create_default_json_fields(model): """ Create default json field configuration in existing database :param model: model concerned """ from ishtar_common.models import JsonDataField def _get_keys(data, current_path=""): keys = [] for key in data.keys(): if type(data[key]) == dict: keys += _get_keys(data[key], current_path + key + "__") continue keys.append(current_path + key) return keys keys = [] for item in model.objects.all(): for key in _get_keys(item.data): if key not in keys: keys.append(key) content_type = ContentType.objects.get_for_model(model) for key in keys: JsonDataField.objects.get_or_create( content_type=content_type, key=key, defaults={ "name": " ".join(key.split("__")).capitalize(), "value_type": "T", "display": False, }, ) def get_urls_for_model( model, views, own=False, autocomplete=False, ): """ Generate get and show url for a model """ urls = [ url( r"show-{}(?:/(?P.+))?/(?P.+)?$".format(model.SLUG), check_rights(["view_" + model.SLUG, "view_own_" + model.SLUG])( getattr(views, "show_" + model.SLUG) ), name="show-" + model.SLUG, ), url( r"^display-{}/(?P.+)/$".format(model.SLUG), check_rights(["view_" + model.SLUG, "view_own_" + model.SLUG])( getattr(views, "display_" + model.SLUG) ), name="display-" + model.SLUG, ), ] if own: urls += [ url( r"get-{}/own/(?P.+)?$".format(model.SLUG), check_rights(["view_" + model.SLUG, "view_own_" + model.SLUG])( getattr(views, "get_" + model.SLUG) ), name="get-own-" + model.SLUG, kwargs={"force_own": True}, ), ] urls += [ url( r"get-{}/(?P.+)?$".format(model.SLUG), check_rights(["view_" + model.SLUG, "view_own_" + model.SLUG])( getattr(views, "get_" + model.SLUG) ), name="get-" + model.SLUG, ), ] if autocomplete: urls += [ url( r"autocomplete-{}/$".format(model.SLUG), check_rights(["view_" + model.SLUG, "view_own_" + model.SLUG])( getattr(views, "autocomplete_" + model.SLUG) ), name="autocomplete-" + model.SLUG, ), ] return urls def m2m_historization_changed(sender, **kwargs): obj = kwargs.get("instance", None) if not obj: return hist_values = obj.history_m2m or {} for attr in obj.HISTORICAL_M2M: values = [] for value in getattr(obj, attr).all(): if not hasattr(value, "history_compress"): continue values.append(value.history_compress()) hist_values[attr] = values obj.history_m2m = hist_values if getattr(obj, "skip_history_when_saving", False): # assume the last modifier is good... q = obj.history.filter( history_modifier_id=obj.history_modifier_id, ).order_by("-history_date", "-history_id") if q.count(): hist = q.all()[0] hist.history_m2m = hist_values hist.history_date = hist.last_modified = datetime.datetime.now() hist.save() obj.skip_history_when_saving = True elif not obj.history_modifier: obj.skip_history_when_saving = True obj.save() def max_size_help(): msg = str(_("The maximum supported file size is {} Mo.")).format( settings.MAX_UPLOAD_SIZE ) return msg def find_all_symlink(dirname): for name in os.listdir(dirname): if name not in (os.curdir, os.pardir): full = os.path.join(dirname, name) if os.path.islink(full): yield full, os.readlink(full) def get_model_by_slug(slug): all_models = apps.get_models() for model in all_models: if hasattr(model, "SLUG") and model.SLUG == slug: return model return MEDIA_RE = [ re.compile(r"_[a-zA-Z0-9]{7}\-[0-9]"), re.compile(r"_[a-zA-Z0-9]{7}"), ] def get_broken_links(path): for root, dirs, files in os.walk(path): for filename in files: path = os.path.join(root, filename) if os.path.islink(path): target_path = os.readlink(path) # resolve relative symlinks if not os.path.isabs(target_path): target_path = os.path.join(os.path.dirname(path), target_path) if not os.path.exists(target_path): yield path def simplify_name(full_path_name, check_existing=False, min_len=15): """ Simplify a file name by removing auto save suffixes :param full_path_name: full path name :param check_existing: prevent to give name of an existing file :param min_len: minimum lenght of the base filename :return: """ name_exp = full_path_name.split(os.sep) path = os.sep.join(name_exp[0:-1]) name = name_exp[-1] current_name = name[:] ext = "" if "." in name: # remove extension if have one names = name.split(".") name = ".".join(names[0:-1]) ext = "." + names[-1] while "_" in name and len(name) > min_len: oldname = name[:] for regex in MEDIA_RE: match = None for m in regex.finditer(name): # get the last match match = m if match: new_name = name.replace(match.group(), "") full_new_name = os.sep.join([path, new_name + ext]) try: is_file = os.path.isfile(full_new_name) except UnicodeEncodeError: is_file = os.path.isfile(full_new_name.encode("utf-8")) if not check_existing or not is_file: # do not take the place of another file name = new_name[:] break if oldname == name: break return path, current_name, name + ext def rename_and_simplify_media_name(full_path_name, rename=True): """ Simplify the name if possible :param full_path_name: full path name :param rename: rename file if True (default: True) :return: new full path name (or old if not changed), modified """ try: exists = os.path.exists(full_path_name) is_file = os.path.isfile(full_path_name) except UnicodeEncodeError: full_path_name_unicode = full_path_name.encode("utf-8") exists = os.path.exists(full_path_name_unicode) is_file = os.path.isfile(full_path_name_unicode) if not exists or not is_file: return full_path_name, False path, current_name, name = simplify_name(full_path_name, check_existing=True) if current_name == name: return full_path_name, False full_new_name = os.sep.join([path, name]) if rename: os.rename(full_path_name, full_new_name) return full_new_name, True def get_file_fields(): """ Get all fields which are inherited from FileField """ all_models = apps.get_models() fields = [] for model in all_models: for field in model._meta.get_fields(): if isinstance(field, models.FileField): fields.append(field) return fields def get_used_media( exclude=None, limit=None, return_object_and_field=False, debug=False ): """ Get media which are still used in models :param exclude: exclude fields, ex: ['ishtar_common.Import.imported_file', 'ishtar_common.Import.imported_images'] :param limit: limit to some fields :param return_object_and_field: return associated object and field name :return: list of media filename or if return_object_and_field is set to True return (object, file field name, media filename) """ if return_object_and_field: media = [] else: media = set() for field in get_file_fields(): if exclude and str(field) in exclude: continue if limit and str(field) not in limit: continue is_null = {"%s__isnull" % field.name: True} is_empty = {"%s" % field.name: ""} storage = field.storage if debug: print("") q = ( field.model.objects.values("id", field.name) .exclude(**is_empty) .exclude(**is_null) ) ln = q.count() for idx, res in enumerate(q): value = res[field.name] if debug: sys.stdout.write("* get_used_media {}: {}/{}\r".format(field, idx, ln)) sys.stdout.flush() if value not in EMPTY_VALUES: if return_object_and_field: media.append( ( field.model.objects.get(pk=res["id"]), field.name, storage.path(value), ) ) else: media.add(storage.path(value)) return media def get_all_media(exclude=None, debug=False): """ Get all media from MEDIA_ROOT """ if not exclude: exclude = [] media = set() full_dirs = list(os.walk(six.text_type(settings.MEDIA_ROOT))) ln_full = len(full_dirs) for idx_main, full_dir in enumerate(full_dirs): root, dirs, files = full_dir ln = len(files) if debug: print("") for idx, name in enumerate(files): if debug: sys.stdout.write( "* get_all_media {} ({}/{}): {}/{}\r".format( root.encode("utf-8"), idx_main, ln_full, idx, ln ) ) sys.stdout.flush() path = os.path.abspath(os.path.join(root, name)) relpath = os.path.relpath(path, settings.MEDIA_ROOT) in_exclude = False for e in exclude: if re.match(r"^%s$" % re.escape(e).replace("\\*", ".*"), relpath): in_exclude = True break if not in_exclude: media.add(path) else: if debug: sys.stdout.write( "* get_all_media {} ({}/{})\r".format( root.encode("utf-8"), idx_main, ln_full ) ) return media def get_unused_media(exclude=None): """ Get media which are not used in models """ if not exclude: exclude = [] all_media = get_all_media(exclude) used_media = get_used_media() return [x for x in all_media if x not in used_media] def remove_unused_media(): """ Remove unused media """ remove_media(get_unused_media()) def remove_media(files): """ Delete file from media dir """ for filename in files: os.remove(os.path.join(settings.MEDIA_ROOT, filename)) def remove_empty_dirs(path=None): """ Recursively delete empty directories; return True if everything was deleted. """ if not path: path = settings.MEDIA_ROOT if not os.path.isdir(path): return False listdir = [os.path.join(path, filename) for filename in os.listdir(path)] if all(list(map(remove_empty_dirs, listdir))): os.rmdir(path) return True else: return False def _try_copy(path, f, dest, simplified_ref_name, make_copy, min_len): full_file = os.path.abspath(os.sep.join([path, f])) # must be a file if not os.path.isfile(full_file) or full_file == dest: return _, _, name = simplify_name(full_file, check_existing=False, min_len=min_len) if simplified_ref_name.lower() == name.lower(): # a candidate is found if make_copy: try: os.remove(dest) except OSError: pass try: shutil.copy2(full_file, dest) except OSError: return return f def try_fix_file(filename, make_copy=True, hard=False): """ Try to find a file with a similar name on the same dir. :param filename: filename (full path) :param make_copy: make the copy of the similar file found :param hard: search on the whole media dir :return: name of the similar file found or None """ filename = os.path.abspath(filename) path, current_name, simplified_ref_name = simplify_name( filename, check_existing=False, min_len=10 ) try: dirs = list(sorted(os.listdir(path))) except UnicodeDecodeError: dirs = list(sorted(os.listdir(path.encode("utf-8")))) # check existing files in the path for f in dirs: result = _try_copy( path, f, filename, simplified_ref_name, make_copy, min_len=10 ) if result: return result if not hard: return for path, __, files in os.walk(settings.MEDIA_ROOT): for f in files: result = _try_copy( path, f, filename, simplified_ref_name, make_copy, min_len=10 ) if result: return result def get_current_profile(force=False): IshtarSiteProfile = apps.get_model("ishtar_common", "IshtarSiteProfile") return IshtarSiteProfile.get_current_profile(force=force) PARSE_FORMULA = re.compile("{([^}]*)}") PARSE_JINJA = re.compile("{{([^}]*)}") PARSE_JINJA_IF = re.compile("{% if ([^}]*)}") def _deduplicate(value): new_values = [] for v in value.split("-"): if v not in new_values: new_values.append(v) return "-".join(new_values) FORMULA_FILTERS = { "upper": lambda x: x.upper(), "lower": lambda x: x.lower(), "capitalize": lambda x: x.capitalize(), "slug": lambda x: slugify(x), "deduplicate": _deduplicate, } def _update_gen_id_dct(item, dct, initial_key, fkey=None, filters=None): if not fkey: fkey = initial_key[:] if fkey.startswith("settings__"): dct[fkey] = getattr(settings, fkey[len("settings__") :]) or "" return obj = item for k in fkey.split("__"): try: obj = getattr(obj, k) except (ObjectDoesNotExist, AttributeError): obj = None if hasattr(obj, "all") and hasattr(obj, "count"): # query manager if not obj.count(): break obj = obj.all()[0] elif callable(obj): obj = obj() if obj is None: break if obj is None: dct[initial_key] = "" else: dct[initial_key] = str(obj) if filters: for filtr in filters: dct[initial_key] = filtr(dct[initial_key]) def get_generated_id(key, item): profile = get_current_profile() if not hasattr(profile, key): return formula = getattr(profile, key) dct = {} # jinja2 style if "{{" in formula or "{%" in formula: # naive parse - only simple jija2 is managed key_list = [] for key in PARSE_JINJA.findall(formula): key = key.strip().split("|")[0] key_list.append(key) for keys in PARSE_JINJA_IF.findall(formula): sub_key_list = keys.split(" or ") res = [] for keys2 in sub_key_list: res += keys2.split(" and ") key_list += [k.split(" ")[0] for k in res] key_list = map(lambda x: x.strip(), key_list) new_keys = [] for key in key_list: if key.startswith("not "): key = key[len("not ") :].strip() key = key.split(".")[0] if " % " in key: keys = key.split(" % ")[1] keys = [ i.replace("(", "").replace(")", "").split("|")[0].strip() for i in keys.split(",") ] else: keys = [key] new_keys += keys key_list = new_keys for key in set(key_list): _update_gen_id_dct(item, dct, key) tpl = Template(formula) values = tpl.render(dct).split("||") else: for fkey in PARSE_FORMULA.findall(formula): filtered = fkey.split("|") initial_key = fkey[:] fkey = filtered[0] filters = [] for filtr in filtered[1:]: if filtr in FORMULA_FILTERS: filters.append(FORMULA_FILTERS[filtr]) _update_gen_id_dct(item, dct, initial_key, fkey, filters=filters) values = formula.format(**dct).split("||") value = values[0] for filtr in values[1:]: if filtr not in FORMULA_FILTERS: value += "||" + filtr continue value = FORMULA_FILTERS[filtr](value) return value PRIVATE_FIELDS = ("id", "history_modifier", "order", "uuid") def duplicate_item(item, user=None, data=None): model = item.__class__ new = model.objects.get(pk=item.pk) for field in model._meta.fields: # pk is in PRIVATE_FIELDS so: new.pk = None and a new # item will be created on save if field.name == "uuid": new.uuid = uuid.uuid4() elif field.name in PRIVATE_FIELDS: setattr(new, field.name, None) if user: new.history_user = user if data: for k in data: setattr(new, k, data[k]) new.save() if hasattr(user, "user_ptr"): if hasattr(new, "history_creator"): new.history_creator = user.user_ptr if hasattr(new, "history_modifier"): new.history_modifier = user.user_ptr new.save() # m2m fields m2m = [ field.name for field in model._meta.many_to_many if field.name not in PRIVATE_FIELDS ] for field in m2m: for val in getattr(item, field).all(): if val not in getattr(new, field).all(): getattr(new, field).add(val) return new def get_image_path(instance, filename): # when using migrations instance is not a real ImageModel instance if not hasattr(instance, "_get_image_path"): n = datetime.datetime.now() return "upload/{}/{:02d}/{:02d}/{}".format(n.year, n.month, n.day, filename) return instance._get_image_path(filename)