#!/usr/bin/env python # -*- coding: utf-8 -*- from copy import copy, deepcopy import csv import datetime import virtualtime import json import logging import re import subprocess from tempfile import NamedTemporaryFile from django.conf import settings from django.contrib.gis.geos import GEOSException from django.contrib.staticfiles.templatetags.staticfiles import static from django.core.cache import cache from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse, NoReverseMatch from django.db.models import Q, ImageField from django.db.models.fields import FieldDoesNotExist from django.http import HttpResponse from django.shortcuts import render from django.template import loader from django.utils.translation import ugettext, ugettext_lazy as _ from tidylib import tidy_document as tidy from unidecode import unidecode from weasyprint import HTML, CSS from weasyprint.fonts import FontConfiguration from ishtar_common.utils import check_model_access_control, CSV_OPTIONS, \ get_all_field_names from ishtar_common.models import HistoryError, get_current_profile, \ PRIVATE_FIELDS, GeneralType from menus import Menu import models from archaeological_files.models import File from archaeological_operations.models import Operation, ArchaeologicalSite, \ AdministrativeAct from archaeological_context_records.models import ContextRecord from archaeological_finds.models import Find, FindBasket, Treatment, \ TreatmentFile from archaeological_warehouse.models import Warehouse logger = logging.getLogger(__name__) ENCODING = settings.ENCODING or 'utf-8' CURRENT_ITEM_KEYS = ( ('file', File), ('operation', Operation), ('site', ArchaeologicalSite), ('contextrecord', ContextRecord), ('warehouse', Warehouse), ('find', Find), ('treatmentfile', TreatmentFile), ('treatment', Treatment), ('administrativeact', AdministrativeAct), ('administrativeactop', AdministrativeAct), ('administrativeactfile', AdministrativeAct), ('administrativeacttreatment', AdministrativeAct), ('administrativeacttreatmentfile', AdministrativeAct), ) CURRENT_ITEM_KEYS_DICT = dict(CURRENT_ITEM_KEYS) def get_autocomplete_item(model, extra=None): if not extra: extra = {} def func(request, current_right=None): q = request.GET.get('term') or "" query = Q(**extra) for q in q.split(' '): if not q: continue query = query & Q(cached_label__icontains=q) limit = 20 objects = model.objects.filter(query)[:limit] data = json.dumps([{'id': obj.pk, 'value': obj.cached_label} for obj in objects]) return HttpResponse(data, content_type='text/plain') return func def check_permission(request, action_slug, obj_id=None): MAIN_MENU = Menu(request.user) MAIN_MENU.init() if action_slug not in MAIN_MENU.items: # TODO return True if obj_id: return MAIN_MENU.items[action_slug].is_available( request.user, obj_id, session=request.session) return MAIN_MENU.items[action_slug].can_be_available( request.user, session=request.session) def new_item(model, frm, many=False): def func(request, parent_name, limits=''): model_name = model._meta.object_name if not check_permission(request, 'add_' + model_name.lower()): not_permitted_msg = ugettext(u"Operation not permitted.") return HttpResponse(not_permitted_msg) dct = {'title': unicode(_(u'New %s' % model_name.lower())), 'many': many} if request.method == 'POST': dct['form'] = frm(request.POST, limits=limits) if dct['form'].is_valid(): new_item = dct['form'].save(request.user) dct['new_item_label'] = unicode(new_item) dct['new_item_pk'] = new_item.pk dct['parent_name'] = parent_name dct['parent_pk'] = parent_name if dct['parent_pk'] and '_select_' in dct['parent_pk']: parents = dct['parent_pk'].split('_') dct['parent_pk'] = "_".join([parents[0]] + parents[2:]) return render(request, 'window.html', dct) else: dct['form'] = frm(limits=limits) return render(request, 'window.html', dct) return func def display_item(model, extra_dct=None, show_url=None): def func(request, pk, **dct): if show_url: dct['show_url'] = "/{}{}/".format(show_url, pk) else: dct['show_url'] = "/show-{}/{}/".format(model.SLUG, pk) return render(request, 'ishtar/display_item.html', dct) return func def show_item(model, name, extra_dct=None): def func(request, pk, **dct): allowed, own = check_model_access_control(request, model) if not allowed: return HttpResponse('', content_type="application/xhtml") q = model.objects if own: if not hasattr(request.user, 'ishtaruser'): return HttpResponse('NOK') query_own = model.get_query_owns(request.user.ishtaruser) if query_own: q = q.filter(query_own).distinct() try: item = q.get(pk=pk) except ObjectDoesNotExist: return HttpResponse('NOK') doc_type = 'type' in dct and dct.pop('type') url_name = u"/".join(reverse('show-' + name, args=['0', ''] ).split('/')[:-2]) + u"/" dct['CURRENCY'] = get_current_profile().currency dct['ENCODING'] = settings.ENCODING dct['DOT_GENERATION'] = settings.DOT_BINARY and True dct['current_window_url'] = url_name date = None if 'date' in dct: date = dct.pop('date') dct['sheet_id'] = "%s-%d" % (name, item.pk) dct['window_id'] = "%s-%d-%s" % ( name, item.pk, datetime.datetime.now().strftime('%M%s')) # list current perms if hasattr(request.user, 'ishtaruser') and request.user.ishtaruser: cache_key = u"{}-{}-{}".format( settings.PROJECT_SLUG, "current-perms", request.session.session_key, ) permissions = cache.get(cache_key) if permissions is None: permissions = [] profile = request.user.ishtaruser.person.current_profile for group in profile.profile_type.groups.all(): for permission in group.permissions.all(): permissions.append(permission.codename) cache.set(cache_key, permissions, settings.CACHE_TIMEOUT) for perm in permissions: dct["permission_" + perm] = True if hasattr(item, 'history'): if date: try: date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f') item = item.get_previous(date=date) assert item is not None except (ValueError, AssertionError): return HttpResponse(None, content_type='text/plain') dct['previous'] = item._previous dct['next'] = item._next else: historized = item.history.all() if historized: item.history_date = historized[0].history_date if len(historized) > 1: dct['previous'] = historized[1].history_date dct['item'], dct['item_name'] = item, name # add context if extra_dct: dct.update(extra_dct(request, item)) context_instance = deepcopy(dct) context_instance['output'] = 'html' if hasattr(item, 'history_object'): filename = item.history_object.associated_filename else: filename = item.associated_filename if doc_type == "odt" and settings.ODT_TEMPLATE: tpl = loader.get_template('ishtar/sheet_%s.html' % name) context_instance['output'] = 'ODT' content = tpl.render(context_instance, request) tidy_options = {'output-xhtml': 1, 'indent': 1, 'tidy-mark': 0, 'doctype': 'auto', 'add-xml-decl': 1, 'wrap': 1} html, errors = tidy(content, options=tidy_options) html = html.encode('utf-8').replace(" ", " ") html = re.sub('
]*)>\n', '', html)
odt = NamedTemporaryFile()
html_source = NamedTemporaryFile()
with open(html_source.name, 'w') as html_file:
html_file.write(html)
pandoc_args = ["pandoc", "-f", "html", "-t", "odt",
"-o", odt.name, html_source.name]
try:
subprocess.check_call(pandoc_args)
except subprocess.CalledProcessError:
return HttpResponse(content,
content_type="application/xhtml")
response = HttpResponse(
content_type='application/vnd.oasis.opendocument.text')
response['Content-Disposition'] = \
'attachment; filename={}.odt'.format(filename)
with open(odt.name, 'r') as odt_file:
response.write(odt_file.read())
return response
elif doc_type == 'pdf':
base_url = "/".join(
request.build_absolute_uri().split("/")[0:3]
)
tpl = loader.get_template('ishtar/sheet_%s_pdf.html' % name)
context_instance['output'] = 'PDF'
html = tpl.render(context_instance, request)
font_config = FontConfiguration()
css = CSS(string='''
@font-face {
font-family: Gentium;
src: url(%s);
}
body{
font-family: Gentium
}
''' % (base_url + static("gentium/GentiumPlus-R.ttf")))
css2 = CSS(filename=settings.STATIC_ROOT + '/media/style_basic.css')
pdf = HTML(
string=html, base_url=base_url
).write_pdf(
stylesheets=[css, css2], font_config=font_config)
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=%s.pdf' % \
filename
return response
else:
tpl = loader.get_template('ishtar/sheet_%s_window.html' % name)
content = tpl.render(context_instance, request)
return HttpResponse(content, content_type="application/xhtml")
return func
def revert_item(model):
def func(request, pk, date, **dct):
try:
item = model.objects.get(pk=pk)
date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f')
item.rollback(date)
except (ObjectDoesNotExist, ValueError, HistoryError):
return HttpResponse(None, content_type='text/plain')
return HttpResponse("True", content_type='text/plain')
return func
HIERARCHIC_LEVELS = 5
HIERARCHIC_FIELDS = ['periods', 'period', 'unit', 'material_types',
'material_type', 'conservatory_state', 'object_types']
def _get_values(request, val):
if hasattr(val, 'all'): # manage related objects
vals = list(val.all())
else:
vals = [val]
new_vals = []
for v in vals:
if callable(v):
v = v()
if hasattr(v, 'url'):
v = request.is_secure() and \
'https' or 'http' + '://' + \
request.get_host() + v.url
new_vals.append(v)
return new_vals
def _push_to_list(obj, current_group, depth):
"""
parse_parentheses helper function
"""
try:
while depth > 0:
current_group = current_group[-1]
depth -= 1
except IndexError:
# tolerant to parentheses mismatch
pass
if current_group and type(obj) in (unicode, str) and \
type(current_group[-1]) in (unicode, str):
current_group[-1] += obj
else:
current_group.append(obj)
def _parse_parentheses(s):
"""
Parse parentheses into list.
(OA01 & (pierre | ciseau)) -> ["0A01 &", ["pierre | ciseau"]]
"""
groups = []
depth = 0
inside_quote = False
for char in s:
if char == u'"':
inside_quote = not inside_quote
if not inside_quote:
if char == u'(':
_push_to_list([], groups, depth)
depth += 1
elif char == u')':
if depth > 0:
depth -= 1
else:
_push_to_list(char, groups, depth)
continue
_push_to_list(char, groups, depth)
# for non tolerant to parentheses mismatch check depth is equal to 0
return groups
FORBIDDEN_CHAR = [u":"]
RESERVED_CHAR = [u"|", u"&"]
def _parse_query_string(string, request_keys, current_dct, exc_dct):
string = string.strip().lower()
if u"=" in string:
splited = string.split(u"=")
if len(splited) == 2:
term, query = splited
excluded = term.startswith(u"-")
if excluded:
term = term[1:]
if term in request_keys:
term = request_keys[term]
dct = current_dct
if excluded:
dct = exc_dct
if term in dct:
dct[term] += u";" + query
else:
dct[term] = query
return u""
for reserved_char in FORBIDDEN_CHAR:
string = string.replace(reserved_char, u"")
if len(string) != 1:
for reserved_char in RESERVED_CHAR:
string = string.replace(reserved_char, u"")
# like search
if string.endswith(u'*'):
if len(string.strip()) == 1:
return u""
string = string[:-1] + u':*'
if string.startswith(u'-'):
if len(string.strip()) == 1:
return u""
string = u"!" + string[1:]
return string
def _parse_parentheses_groups(groups, request_keys, current_dct=None,
exc_dct=None):
"""
Transform parentheses groups to query
:param groups: groups to transform (list)
:param request_keys: request keys for facet search
:param current_dct: query dict
:param exc_dct: exclude query dict
:return: query string, query dict, excluded query dict
"""
if not current_dct:
current_dct = {}
if not exc_dct:
exc_dct = {}
if type(groups) is not list:
string = groups.strip()
if string.startswith(u'"') and string.endswith(u'"') and \
string.count(u'"') == 2:
string = string[1:-1]
# split into many groups if spaces
# do not split inside quotes
current_index = 0
found = string.find('"', current_index)
SEP = u"?รง;?" # replace spaces inside quote with this characters
previous_quote = None
while found != -1:
if previous_quote is not None:
string = string[0:previous_quote] + \
string[previous_quote:found].replace(u' ', SEP) + \
string[found:]
previous_quote = None
# SEP is larger than a space
found = string.find('"', current_index)
else:
previous_quote = found
current_index = found + 1
found = string.find('"', current_index)
string_groups = [gp.replace(SEP, u" ") for gp in string.split(u" ")]
if len(string_groups) == 1:
return _parse_query_string(
string_groups[0], request_keys, current_dct, exc_dct), \
current_dct, exc_dct
return _parse_parentheses_groups(string_groups,
request_keys, current_dct, exc_dct)
if not groups: # empty list
return "", current_dct, exc_dct
query = u"("
previous_sep, has_item = None, False
for item in groups:
q, current_dct, exc_dct = _parse_parentheses_groups(
item, request_keys, current_dct, exc_dct)
q = q.strip()
if not q:
continue
if q in (u"|", u"&"):
if previous_sep or not has_item:
continue # multiple sep is not relevant
previous_sep = q
continue
if has_item:
if previous_sep:
query += previous_sep
else:
query += u" & "
query += q
has_item = True
previous_sep = None
query += u")"
if query == u"()":
query = u""
return unidecode(query), current_dct, exc_dct
def _search_manage_search_vector(model, dct, exc_dct, request_keys):
if 'search_vector' not in dct:
return dct, exc_dct
search_vector = dct['search_vector']
parentheses_groups = _parse_parentheses(search_vector)
search_query, extra_dct, extra_exc_dct = _parse_parentheses_groups(
parentheses_groups, request_keys)
dct.update(extra_dct)
exc_dct.update(extra_exc_dct)
if search_query:
# remove inside parenthesis
search_query = \
search_query.replace(u'(', u'').replace(u')', u'').strip()
if search_query:
dct['extras'].append(
{'where': [model._meta.db_table +
".search_vector @@ (to_tsquery(%s, %s)) = true"],
'params': [settings.ISHTAR_SEARCH_LANGUAGE,
search_query]}
)
return dct, exc_dct
def _manage_bool_fields(model, bool_fields, reversed_bool_fields, dct, or_reqs):
bool_fields = list(bool_fields) + list(reversed_bool_fields)
for k in bool_fields:
if k in dct:
if dct[k] == u"1":
dct.pop(k)
else:
dct[k] = dct[k].replace(u'"', u'')
if dct[k] in [u"2", u"yes", unicode(_(u"Yes")).lower()]:
dct[k] = True
else:
dct[k] = False
if k in reversed_bool_fields:
dct[k] = not dct[k]
# check also for empty value with image field
field_name = k.split('__')[0]
# TODO: can be improved in later version of Django
try:
c_field = model._meta.get_field(field_name)
if k.endswith('__isnull') and \
isinstance(c_field, ImageField):
if dct[k]:
or_reqs.append(
(k, {k.split('__')[0] + '__exact': ''}))
else:
dct[k.split('__')[0] + '__regex'] = '.{1}.*'
except FieldDoesNotExist:
pass
def _manage_dated_fields(dated_fields, dct):
for k in dated_fields:
if k in dct:
if not dct[k]:
dct.pop(k)
try:
items = dct[k].replace('"', '').split('/')
assert len(items) == 3
dct[k] = virtualtime.datetime(*map(lambda x: int(x),
reversed(items))) \
.strftime('%Y-%m-%d')
except AssertionError:
dct.pop(k)
def _clean_type_val(val):
for prefix in GeneralType.PREFIX_CODES:
val = val.replace(prefix, u"")
val = val.strip()
if val.startswith(u'"') and val.endswith(u'"'):
val = u'"{}"'.format(val[1:-1].strip())
return val
def _manage_facet_search(model, dct, and_reqs):
if not hasattr(model, "general_types"):
return
general_types = model.general_types()
for base_k in general_types:
if base_k in HIERARCHIC_FIELDS: # already managed
continue
k = base_k + "__pk"
if k not in dct or not dct[k].startswith(u'"') \
or not dct[k].startswith(u'"'):
continue
val = _clean_type_val(dct.pop(k))
if u";" in val:
# OR request
values = val.split(u";")
else:
values = [val]
reqs = None
for val in values:
if not val.endswith(u'"') or not val.startswith(u""):
continue
suffix = "__label__icontains" if u"%" in val else \
"__label__iexact"
query = val[1:-1].replace(u'*', u"")
if not reqs:
reqs = Q(**{base_k + suffix: query})
else:
reqs |= Q(**{base_k + suffix: query})
if reqs:
and_reqs.append(reqs)
POST_PROCESS_REQUEST = getattr(model, 'POST_PROCESS_REQUEST', None)
if not POST_PROCESS_REQUEST:
return
for k in dct:
if k in POST_PROCESS_REQUEST and dct[k]:
dct[k] = getattr(model, POST_PROCESS_REQUEST[k])(
dct[k].replace(u'"', ''))
def _manage_hierarchic_fields(dct, and_reqs):
for reqs in dct.copy():
if type(reqs) not in (list, tuple):
reqs = [reqs]
for req in reqs:
if req.endswith('areas__pk') \
or req.endswith('areas__label__iexact'):
if req.endswith('pk'):
suffix = 'pk'
elif req.endswith('label__iexact'):
suffix = 'label__iexact'
else:
continue
val = _clean_type_val(dct.pop(req))
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
reqs = Q(**{req: val})
base_req = req[:]
for idx in range(HIERARCHIC_LEVELS):
req = req[:-(len(suffix))] + 'parent__' + suffix
q = Q(**{req: val})
reqs |= q
req = base_req[:]
for idx in range(HIERARCHIC_LEVELS):
req = req[:-(len(suffix))] + 'children__' + suffix
q = Q(**{req: val})
reqs |= q
and_reqs.append(reqs)
continue
if req.endswith('town__pk') or req.endswith('towns__pk') \
or req.endswith('town__cached_label__iexact') \
or req.endswith('towns__cached_label__iexact'):
if req.endswith('pk'):
suffix = 'pk'
elif req.endswith('cached_label__iexact'):
suffix = 'cached_label__iexact'
else:
continue
val = _clean_type_val(dct.pop(req)).strip('"')
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
reqs = Q(**{req: val})
base_req = req[:]
for idx in range(HIERARCHIC_LEVELS):
req = req[:-(len(suffix))] + 'parents__' + suffix
q = Q(**{req: val})
reqs |= q
req = base_req[:]
for idx in range(HIERARCHIC_LEVELS):
req = req[:-(len(suffix))] + 'children__' + suffix
q = Q(**{req: val})
reqs |= q
and_reqs.append(reqs)
continue
for k_hr in HIERARCHIC_FIELDS:
if type(req) in (list, tuple):
val = dct.pop(req)
val = _clean_type_val(val)
q = None
for idx, r in enumerate(req):
r = _clean_type_val(r)
if not idx:
q = Q(**{r: val})
else:
q |= Q(**{r: val})
and_reqs.append(q)
break
elif req.endswith(k_hr + '__pk') \
or req.endswith(k_hr + '__label__iexact'):
val = _clean_type_val(dct.pop(req))
if u";" in val:
# OR request
values = val.split(u";")
else:
values = [val]
base_req = req[:]
reqs = None
if req.endswith('pk'):
base_suffix = "pk"
elif req.endswith('label__iexact'):
base_suffix = "label__iexact"
else:
continue
for val in values:
suffix = base_suffix[:]
req = base_req[:]
if val.startswith(u'"') and val.startswith(u'"'):
# manage search text by label
if u"*" in val:
suffix = "label__icontains"
val = val.replace(u'*', u"")
else:
suffix = "label__iexact"
val = val[1:-1]
req = req[:-(len(base_suffix))] + suffix
if not reqs:
reqs = Q(**{req: val})
else:
reqs |= Q(**{req: val})
for idx in range(HIERARCHIC_LEVELS):
req = req[:-(len(suffix))] + 'parent__' + suffix
q = Q(**{req: val})
reqs |= q
if reqs:
and_reqs.append(reqs)
break
def _manage_clean_search_field(dct):
for k in dct:
# clean quoted search field
if type(dct[k]) == unicode:
dct[k] = dct[k].replace(u'"', '')
dct[k] = _clean_type_val(dct[k])
if '*' in dct[k] and k.endswith('__iexact'):
value = dct.pop(k).replace(u'*', u'')
dct[k[:-len('__iexact')] + '__icontains'] = value
def _manage_relation_types(relation_types, dct, query, or_reqs):
for rtype_prefix in relation_types:
vals = relation_types[rtype_prefix]
if not vals:
continue
vals = list(vals)[0].split(';')
for v in vals:
alt_dct = {
rtype_prefix + 'right_relations__relation_type__label__iexact':
v.replace('"', '')}
for k in dct:
val = dct[k]
if rtype_prefix:
# only get conditions related to the object
if rtype_prefix not in k:
continue
# tricky: reconstruct the key to make sense - remove the
# prefix from the key
k = k[0:k.index(rtype_prefix)] + \
k[k.index(rtype_prefix) + len(rtype_prefix):]
if k.endswith('year'):
k += '__exact'
alt_dct[rtype_prefix + 'right_relations__right_record__' + k] = \
val
if not dct:
# fake condition to trick Django (1.4): without it only the
# alt_dct is managed
query &= Q(pk__isnull=False)
query |= Q(**alt_dct)
for k, or_req in or_reqs:
altor_dct = alt_dct.copy()
altor_dct.pop(k)
for j in or_req:
val = or_req[j]
if j == 'year':
j = 'year__exact'
altor_dct[
rtype_prefix + 'right_relations__right_record__' + j] = \
val
query |= Q(**altor_dct)
return query
def _construct_query(relation_types, dct, or_reqs, and_reqs):
# manage multi value not already managed
for key in dct.keys():
if type(dct[key]) == unicode and u";" in dct[key]:
values = [v for v in dct[key].split(u';') if v]
if not values:
dct.pop(key)
continue
dct[key] = values[0]
if len(values) == 1:
continue
for v in values[1:]:
or_reqs.append(
(key, {key: v})
)
for k in list(dct.keys()):
if type(k) not in (list, tuple):
continue
first_key = k[0]
value = dct[k][:]
dct.pop(k)
dct[first_key] = value
for other_key in k[1:]:
or_reqs.append((first_key, {other_key: value}))
query = Q(**dct)
for k, or_req in or_reqs:
alt_dct = dct.copy()
alt_dct.pop(k)
alt_dct.update(or_req)
query |= Q(**alt_dct)
query = _manage_relation_types(relation_types, dct, query, or_reqs)
for and_req in and_reqs:
query = query & and_req
return query
def _manage_default_search(dct, request, model, default_name, my_base_request,
my_relative_session_names):
pinned_search = ""
pin_key = "pin-search-" + default_name
if pin_key in request.session and \
request.session[pin_key]: # a search is pinned
pinned_search = request.session[pin_key]
dct = {'search_vector': request.session[pin_key]}
elif default_name in request.session and \
request.session[default_name]: # an item is pinned
value = request.session[default_name]
if 'basket-' in value:
try:
dct = {"basket__pk":
request.session[default_name].split('-')[-1]}
pinned_search = unicode(FindBasket.objects.get(
pk=dct["basket__pk"]))
except FindBasket.DoesNotExist:
pass
else:
try:
dct = {"pk": request.session[default_name]}
pinned_search = u'"{}"'.format(
model.objects.get(pk=dct["pk"])
)
except model.DoesNotExist:
pass
elif dct == (my_base_request or {}):
if not hasattr(model, 'UP_MODEL_QUERY'):
logger.warning(
"**WARN get_item**: - UP_MODEL_QUERY not defined for "
"'{}'".format(model))
else:
# a parent item may be selected in the default menu
for name, key in my_relative_session_names:
if name in request.session and request.session[name] \
and 'basket-' not in request.session[name] \
and name in CURRENT_ITEM_KEYS_DICT:
up_model = CURRENT_ITEM_KEYS_DICT[name]
try:
dct.update({key: request.session[name]})
up_item = up_model.objects.get(pk=dct[key])
if up_item.SLUG not in model.UP_MODEL_QUERY:
logger.warning(
"**WARN get_item**: - {} not in "
"UP_MODEL_QUERY for {}'".format(
up_item.SLUG,
model))
else:
req_key, up_attr = model.UP_MODEL_QUERY[
up_item.SLUG]
pinned_search = u'{}="{}"'.format(
req_key,
getattr(up_item, up_attr)
)
break
except up_model.DoesNotExist:
pass
return dct, pinned_search
def _format_val(val):
if val is None:
return u""
if type(val) == bool:
if val:
return unicode(_(u"True"))
else:
return unicode(_(u"False"))
if type(val) == str:
val = val.decode('utf-8')
return unicode(val)
DEFAULT_ROW_NUMBER = 10
# length is used by ajax DataTables requests
EXCLUDED_FIELDS = ['length']
def get_item(model, func_name, default_name, extra_request_keys=[],
base_request=None, bool_fields=[], reversed_bool_fields=[],
dated_fields=[], associated_models=[], relative_session_names=[],
specific_perms=[], own_table_cols=None, relation_types_prefix={},
do_not_deduplicate=False):
"""
Generic treatment of tables
:param model: model used for query
:param func_name: name of the function (used for session storage)
:param default_name: key used for default search in session
:param extra_request_keys: default query limitation
:param base_request:
:param bool_fields:
:param reversed_bool_fields:
:param dated_fields:
:param associated_models:
:param relative_session_names:
:param specific_perms:
:param own_table_cols:
:param relation_types_prefix:
:param do_not_deduplicate: duplication of id can occurs on large queryset a
mecanism of deduplication is used. But duplicate ids can be normal (for
instance for record_relations view).
:return:
"""
def func(request, data_type='json', full=False, force_own=False,
col_names=None, **dct):
available_perms = []
if specific_perms:
available_perms = specific_perms[:]
EMPTY = ''
if 'type' in dct:
data_type = dct.pop('type')
if not data_type:
EMPTY = '[]'
data_type = 'json'
allowed, own = check_model_access_control(request, model,
available_perms)
if not allowed:
return HttpResponse(EMPTY, content_type='text/plain')
if force_own:
own = True
if full == 'shortcut' and 'SHORTCUT_SEARCH' in request.session and \
request.session['SHORTCUT_SEARCH'] == 'own':
own = True
query_own = None
if own:
q = models.IshtarUser.objects.filter(user_ptr=request.user)
if not q.count():
return HttpResponse(EMPTY, content_type='text/plain')
query_own = model.get_query_owns(q.all()[0])
# get defaults from model
if not extra_request_keys and hasattr(model, 'EXTRA_REQUEST_KEYS'):
my_extra_request_keys = copy(model.EXTRA_REQUEST_KEYS)
else:
my_extra_request_keys = copy(extra_request_keys)
if base_request is None and hasattr(model, 'BASE_REQUEST'):
if callable(model.BASE_REQUEST):
my_base_request = model.BASE_REQUEST(request)
else:
my_base_request = copy(model.BASE_REQUEST)
elif base_request is not None:
my_base_request = copy(base_request)
else:
my_base_request = {}
if not bool_fields and hasattr(model, 'BOOL_FIELDS'):
my_bool_fields = model.BOOL_FIELDS[:]
else:
my_bool_fields = bool_fields[:]
if not reversed_bool_fields and hasattr(model, 'REVERSED_BOOL_FIELDS'):
my_reversed_bool_fields = model.REVERSED_BOOL_FIELDS[:]
else:
my_reversed_bool_fields = reversed_bool_fields[:]
if not dated_fields and hasattr(model, 'DATED_FIELDS'):
my_dated_fields = model.DATED_FIELDS[:]
else:
my_dated_fields = dated_fields[:]
if not associated_models and hasattr(model, 'ASSOCIATED_MODELS'):
my_associated_models = model.ASSOCIATED_MODELS[:]
else:
my_associated_models = associated_models[:]
if not relative_session_names and hasattr(model,
'RELATIVE_SESSION_NAMES'):
my_relative_session_names = model.RELATIVE_SESSION_NAMES[:]
else:
my_relative_session_names = relative_session_names[:]
if not relation_types_prefix and hasattr(model,
'RELATION_TYPES_PREFIX'):
my_relation_types_prefix = copy(model.RELATION_TYPES_PREFIX)
else:
my_relation_types_prefix = copy(relation_types_prefix)
fields = [model._meta.get_field(k)
for k in get_all_field_names(model)]
request_keys = dict([
(field.name,
field.name + (hasattr(field, 'rel') and field.rel and '__pk'
or ''))
for field in fields])
for associated_model, key in my_associated_models:
if type(associated_model) in (str, unicode):
if associated_model not in globals():
continue
associated_model = globals()[associated_model]
associated_fields = [
associated_model._meta.get_field(k)
for k in get_all_field_names(associated_model)]
request_keys.update(
dict([(key + "__" + field.name,
key + "__" + field.name +
(hasattr(field, 'rel') and field.rel and '__pk' or ''))
for field in associated_fields]))
request_keys.update(my_extra_request_keys)
if "query" in dct:
request_items = dct["query"]
request_items["submited"] = True
elif request.method == 'POST':
request_items = request.POST
else:
request_items = request.GET
count = dct.get('count', False)
# pager
try:
row_nb = int(request_items.get('length'))
except (ValueError, TypeError):
row_nb = DEFAULT_ROW_NUMBER
dct_request_items = {}
# filter requested fields
for k in request_items:
if k in EXCLUDED_FIELDS:
continue
key = k[:]
if key.startswith('searchprefix_'):
key = key[len('searchprefix_'):]
dct_request_items[key] = request_items[k]
request_items = dct_request_items
base_query = None
if isinstance(my_base_request, Q):
base_query = my_base_request
dct = {}
else:
dct = my_base_request
excluded_dct = {}
and_reqs, or_reqs = [], []
exc_and_reqs, exc_or_reqs = [], []
if full == 'shortcut':
if model.SLUG == "warehouse":
key = 'name__icontains'
else:
key = 'cached_label__icontains'
dct[key] = request.GET.get('term', None)
try:
old = 'old' in request_items and int(request_items['old'])
except ValueError:
return HttpResponse('[]', content_type='text/plain')
for k in request_keys:
val = request_items.get(k)
if not val:
continue
req_keys = request_keys[k]
if type(req_keys) not in (list, tuple):
dct[req_keys] = val
continue
# multiple choice target
reqs = Q(**{req_keys[0]: val})
for req_key in req_keys[1:]:
q = Q(**{req_key: val})
reqs |= q
and_reqs.append(reqs)
pinned_search = ""
# manage default and pinned search
if 'submited' not in request_items and full != 'shortcut':
if data_type == 'csv' and func_name in request.session:
dct = request.session[func_name]
else:
# default search
dct, pinned_search = _manage_default_search(
dct, request, model, default_name, my_base_request,
my_relative_session_names)
else:
request.session[func_name] = dct
dct['extras'] = []
dct, excluded_dct = _search_manage_search_vector(
model, dct, excluded_dct, request_keys)
search_vector = ""
if 'search_vector' in dct:
search_vector = dct.pop('search_vector')
# manage relations types
if 'relation_types' not in my_relation_types_prefix:
my_relation_types_prefix['relation_types'] = ''
relation_types = {}
for rtype_key in my_relation_types_prefix:
relation_types[my_relation_types_prefix[rtype_key]] = set()
for keys in list(dct.keys()):
if type(keys) not in (list, tuple):
keys = [keys]
for k in keys:
if k.startswith(rtype_key):
relation_types[my_relation_types_prefix[rtype_key]].add(
dct.pop(k)
)
_manage_bool_fields(model, my_bool_fields, my_reversed_bool_fields,
dct, or_reqs)
_manage_bool_fields(model, my_bool_fields, my_reversed_bool_fields,
excluded_dct, exc_or_reqs)
_manage_dated_fields(my_dated_fields, dct)
_manage_dated_fields(my_dated_fields, excluded_dct)
_manage_hierarchic_fields(dct, and_reqs)
_manage_hierarchic_fields(excluded_dct, exc_and_reqs)
_manage_facet_search(model, dct, and_reqs)
_manage_facet_search(model, excluded_dct, exc_and_reqs)
extras = dct.pop('extras')
_manage_clean_search_field(dct)
_manage_clean_search_field(excluded_dct)
query = _construct_query(relation_types, dct, or_reqs, and_reqs)
exc_query = None
if excluded_dct or exc_and_reqs or exc_or_reqs:
exc_query = _construct_query(
relation_types, excluded_dct, exc_or_reqs, exc_and_reqs)
if query_own:
query = query & query_own
# manage hierarchic in shortcut menu
if full == 'shortcut':
ASSOCIATED_ITEMS = {
Operation: (File, 'associated_file__pk'),
ContextRecord: (Operation, 'operation__pk'),
Find: (ContextRecord, 'base_finds__context_record__pk'),
}
if model in ASSOCIATED_ITEMS:
upper_model, upper_key = ASSOCIATED_ITEMS[model]
model_name = upper_model.SLUG
current = model_name in request.session \
and request.session[model_name]
if current:
dct = {upper_key: current}
query &= Q(**dct)
# print(query)
items = model.objects.filter(query)
if base_query:
items = items.filter(base_query)
if exc_query:
items = items.exclude(exc_query)
for extra in extras:
items = items.extra(**extra)
items = items.distinct()
if count:
return items.count()
# print(unicode(items.query).encode('utf-8'))
if search_vector: # for serialization
dct['search_vector'] = search_vector
# table cols
if own_table_cols:
table_cols = own_table_cols
else:
if full:
table_cols = [field.name for field in model._meta.fields
if field.name not in PRIVATE_FIELDS]
table_cols += [field.name for field in model._meta.many_to_many
if field.name not in PRIVATE_FIELDS]
if hasattr(model, 'EXTRA_FULL_FIELDS'):
table_cols += model.EXTRA_FULL_FIELDS
else:
tb_key = (getattr(model, 'SLUG', None), 'TABLE_COLS')
if tb_key in settings.TABLE_COLS:
table_cols = settings.TABLE_COLS[tb_key]
else:
table_cols = model.TABLE_COLS
query_table_cols = []
for cols in table_cols:
if type(cols) not in (list, tuple):
cols = [cols]
for col in cols:
query_table_cols += col.split('|')
# contextual (full, simple, etc.) col
contxt = full and 'full' or 'simple'
if hasattr(model, 'CONTEXTUAL_TABLE_COLS') and \
contxt in model.CONTEXTUAL_TABLE_COLS:
for idx, col in enumerate(table_cols):
if col in model.CONTEXTUAL_TABLE_COLS[contxt]:
query_table_cols[idx] = \
model.CONTEXTUAL_TABLE_COLS[contxt][col]
if full == 'shortcut':
if model.SLUG == "warehouse":
query_table_cols = ['name']
table_cols = ['name']
else:
query_table_cols = ['cached_label']
table_cols = ['cached_label']
# manage sort tables
manual_sort_key = None
sorts = {}
for k in request_items:
if not k.startswith('order['):
continue
num = int(k.split(']')[0][len("order["):])
if num not in sorts:
sorts[num] = ['', ''] # sign, col_num
if k.endswith('[dir]'):
order = request_items[k]
sign = order and order == u'desc' and "-" or ''
sorts[num][0] = sign
if k.endswith('[column]'):
sorts[num][1] = request_items[k]
sign = ""
if not sorts and model._meta.ordering:
orders = [k for k in model._meta.ordering]
items = items.order_by(*orders)
else:
orders = []
for idx in sorted(sorts.keys()):
signe, col_num = sorts[idx]
col_num = int(col_num)
# remove id and link col
if col_num < 2:
continue
k = query_table_cols[col_num - 2]
if k in request_keys:
ks = request_keys[k]
if type(ks) not in (tuple, list):
ks = [ks]
for k in ks:
if k.endswith("__pk"):
k = k[:-len("__pk")] + "__label"
if k.endswith("towns"):
k = k + "__cached_label"
if k.endswith("__icontains") or \
k.endswith("__contains") or \
k.endswith("__iexact") or \
k.endswith("__exact"):
k = '__'.join(k.split('__')[:-1])
# if '__' in k:
# k = k.split('__')[0]
orders.append(signe + k)
else:
# not a standard request key
if idx: # not the first - we ignore this sort
continue
sign = signe
manual_sort_key = k
logger.warning(
"**WARN get_item - {}**: manual sort key '{}'".format(
func_name, k))
break
if not manual_sort_key:
items = items.order_by(*orders)
# pager management
start, end = 0, None
page_nb = 1
if row_nb and data_type == "json":
try:
start = int(request_items.get('start'))
page_nb = start / row_nb + 1
assert page_nb >= 1
except (TypeError, ValueError, AssertionError):
start = 0
page_nb = 1
end = page_nb * row_nb
if full == 'shortcut':
start = 0
end = 20
items_nb = items.count()
if manual_sort_key:
items = items.all()
else:
items = items[start:end]
datas = []
if old:
items = [item.get_previous(old) for item in items]
c_ids = []
for item in items:
# manual deduplicate when distinct is not enough
if not do_not_deduplicate and item.pk in c_ids:
continue
c_ids.append(item.pk)
data = [item.pk]
for keys in query_table_cols:
if type(keys) not in (list, tuple):
keys = [keys]
my_vals = []
for k in keys:
if hasattr(model, 'EXTRA_REQUEST_KEYS') \
and k in model.EXTRA_REQUEST_KEYS:
k = model.EXTRA_REQUEST_KEYS[k]
if type(k) in (list, tuple):
k = k[0]
for filtr in ('__icontains', '__contains', '__iexact',
'__exact'):
if k.endswith(filtr):
k = k[:len(k) - len(filtr)]
vals = [item]
# foreign key may be divided by "." or "__"
splitted_k = []
for ky in k.split('.'):
if '__' in ky:
splitted_k += ky.split('__')
else:
splitted_k.append(ky)
for ky in splitted_k:
new_vals = []
for val in vals:
if hasattr(val, 'all'): # manage related objects
val = list(val.all())
for v in val:
v = getattr(v, ky)
new_vals += _get_values(request, v)
elif val:
try:
val = getattr(val, ky)
new_vals += _get_values(request, val)
except (AttributeError, GEOSException):
# must be a query key such as "contains"
pass
vals = new_vals
# manage last related objects
if vals and hasattr(vals[0], 'all'):
new_vals = []
for val in vals:
new_vals += list(val.all())
vals = new_vals
if not my_vals:
my_vals = [_format_val(va) for va in vals]
else:
new_vals = []
if not vals:
for idx, my_v in enumerate(my_vals):
new_vals.append(u"{}{}{}".format(
my_v, u' - ', ''))
else:
for idx, v in enumerate(vals):
new_vals.append(u"{}{}{}".format(
vals[idx], u' - ', _format_val(v)))
my_vals = new_vals[:]
data.append(u" & ".join(my_vals) or u"")
datas.append(data)
if manual_sort_key:
# +1 because the id is added as a first col
idx_col = None
if manual_sort_key in query_table_cols:
idx_col = query_table_cols.index(manual_sort_key) + 1
else:
for idx, col in enumerate(query_table_cols):
if type(col) in (list, tuple) and \
manual_sort_key in col:
idx_col = idx + 1
if idx_col is not None:
datas = sorted(datas, key=lambda x: x[idx_col])
if sign == '-':
datas = reversed(datas)
datas = list(datas)[start:end]
link_template = "" \
""
link_ext_template = '{}'
if data_type == "json":
rows = []
for data in datas:
try:
lnk = link_template % reverse('show-' + default_name,
args=[data[0], ''])
except NoReverseMatch:
logger.warning(
'**WARN "show-' + default_name + '" args ('
+ unicode(data[0]) + ") url not available")
lnk = ''
res = {'id': data[0], 'link': lnk}
for idx, value in enumerate(data[1:]):
if value:
table_col = table_cols[idx]
if type(table_col) not in (list, tuple):
table_col = [table_col]
tab_cols = []
# foreign key may be divided by "." or "__"
for tc in table_col:
if '.' in tc:
tab_cols += tc.split('.')
elif '__' in tc:
tab_cols += tc.split('__')
else:
tab_cols.append(tc)
k = "__".join(tab_cols)
if hasattr(model, 'COL_LINK') and k in model.COL_LINK:
value = link_ext_template.format(value, value)
res[k] = value
if full == 'shortcut':
if 'cached_label' in res:
res['value'] = res.pop('cached_label')
elif 'name' in res:
res['value'] = res.pop('name')
rows.append(res)
if full == 'shortcut':
data = json.dumps(rows)
else:
data = json.dumps({
"recordsTotal": items_nb,
"recordsFiltered": items_nb,
"rows": rows,
"pinned-search": pinned_search,
"page": page_nb,
"total": (items_nb / row_nb + 1) if row_nb else items_nb,
})
return HttpResponse(data, content_type='text/plain')
elif data_type == "csv":
response = HttpResponse(content_type='text/csv')
n = datetime.datetime.now()
filename = u'%s_%s.csv' % (default_name,
n.strftime('%Y%m%d-%H%M%S'))
response['Content-Disposition'] = 'attachment; filename=%s' \
% filename
writer = csv.writer(response, **CSV_OPTIONS)
if col_names:
col_names = [name.encode(ENCODING, errors='replace')
for name in col_names]
else:
col_names = []
for field_name in table_cols:
if type(field_name) in (list, tuple):
field_name = u" & ".join(field_name)
if hasattr(model, 'COL_LABELS') and \
field_name in model.COL_LABELS:
field = model.COL_LABELS[field_name]
col_names.append(unicode(field).encode(ENCODING))
continue
else:
try:
field = model._meta.get_field(field_name)
except:
col_names.append(u"".encode(ENCODING))
logger.warning(
"**WARN get_item - csv export**: no col name "
"for {}\nadd explicit label to "
"COL_LABELS attribute of "
"{}".format(field_name, model))
continue
col_names.append(
unicode(field.verbose_name).encode(ENCODING))
writer.writerow(col_names)
for data in datas:
row, delta = [], 0
# regroup cols with join "|"
for idx, col_name in enumerate(table_cols):
if len(data[1:]) <= idx + delta:
break
val = data[1:][idx + delta].encode(
ENCODING, errors='replace')
if col_name and "|" in col_name[0]:
for delta_idx in range(
len(col_name[0].split('|')) - 1):
delta += 1
val += data[1:][idx + delta].encode(
ENCODING, errors='replace')
row.append(val)
writer.writerow(row)
return response
return HttpResponse('{}', content_type='text/plain')
return func