summaryrefslogtreecommitdiff
path: root/ishtar_common/views_item.py
diff options
context:
space:
mode:
Diffstat (limited to 'ishtar_common/views_item.py')
-rw-r--r--ishtar_common/views_item.py936
1 files changed, 936 insertions, 0 deletions
diff --git a/ishtar_common/views_item.py b/ishtar_common/views_item.py
new file mode 100644
index 000000000..eef3440bc
--- /dev/null
+++ b/ishtar_common/views_item.py
@@ -0,0 +1,936 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import csv
+import datetime
+import json
+import logging
+import optparse
+import re
+from copy import copy, deepcopy
+from tempfile import NamedTemporaryFile
+
+from django.conf import settings
+from django.contrib.postgres.search import SearchQuery
+from django.contrib.staticfiles.templatetags.staticfiles import static
+from django.core.exceptions import ObjectDoesNotExist
+from django.core.urlresolvers import reverse, NoReverseMatch
+from django.db.models import Q, ImageField
+from django.db.models.fields import FieldDoesNotExist
+from django.http import HttpResponse
+from django.shortcuts import render
+from django.template import loader
+from django.utils.translation import ugettext, ugettext_lazy as _
+from tidylib import tidy_document as tidy
+from unidecode import unidecode
+from weasyprint import HTML, CSS
+from weasyprint.fonts import FontConfiguration
+from xhtml2odt import xhtml2odt
+
+from ishtar_common.utils import check_model_access_control, CSV_OPTIONS, \
+ get_all_field_names
+from ishtar_common.models import HistoryError, get_current_profile, \
+ PRIVATE_FIELDS
+from menus import Menu
+
+import models
+from archaeological_files.models import File
+from archaeological_operations.models import Operation
+from archaeological_context_records.models import ContextRecord
+from archaeological_finds.models import Find, FindBasket, Treatment, \
+ TreatmentFile
+
+logger = logging.getLogger(__name__)
+
+ENCODING = settings.ENCODING or 'utf-8'
+
+CURRENT_ITEM_KEYS = (('file', File),
+ ('operation', Operation),
+ ('contextrecord', ContextRecord),
+ ('find', Find),
+ ('treatmentfile', TreatmentFile),
+ ('treatment', Treatment))
+CURRENT_ITEM_KEYS_DICT = dict(CURRENT_ITEM_KEYS)
+
+
+def check_permission(request, action_slug, obj_id=None):
+ MAIN_MENU = Menu(None)
+ MAIN_MENU.init()
+ if action_slug not in MAIN_MENU.items:
+ # TODO
+ return True
+ if obj_id:
+ return MAIN_MENU.items[action_slug].is_available(
+ request.user, obj_id, session=request.session)
+ return MAIN_MENU.items[action_slug].can_be_available(
+ request.user, session=request.session)
+
+
+def new_item(model, frm, many=False):
+ def func(request, parent_name, limits=''):
+ model_name = model._meta.object_name
+ if not check_permission(request, 'add_' + model_name.lower()):
+ not_permitted_msg = ugettext(u"Operation not permitted.")
+ return HttpResponse(not_permitted_msg)
+ dct = {'title': unicode(_(u'New %s' % model_name.lower())),
+ 'many': many}
+ if request.method == 'POST':
+ dct['form'] = frm(request.POST, limits=limits)
+ if dct['form'].is_valid():
+ new_item = dct['form'].save(request.user)
+ dct['new_item_label'] = unicode(new_item)
+ dct['new_item_pk'] = new_item.pk
+ dct['parent_name'] = parent_name
+ dct['parent_pk'] = parent_name
+ if dct['parent_pk'] and '_select_' in dct['parent_pk']:
+ parents = dct['parent_pk'].split('_')
+ dct['parent_pk'] = "_".join([parents[0]] + parents[2:])
+ return render(request, 'window.html', dct)
+ else:
+ dct['form'] = frm(limits=limits)
+ return render(request, 'window.html', dct)
+ return func
+
+
+def display_item(model, extra_dct=None, show_url=None):
+ def func(request, pk, **dct):
+ if show_url:
+ dct['show_url'] = "/{}{}/".format(show_url, pk)
+ else:
+ dct['show_url'] = "/show-{}/{}/".format(model.SLUG, pk)
+ return render(request, 'ishtar/display_item.html', dct)
+ return func
+
+
+def show_item(model, name, extra_dct=None):
+ def func(request, pk, **dct):
+ allowed, own = check_model_access_control(request, model)
+ if not allowed:
+ return HttpResponse('', content_type="application/xhtml")
+ q = model.objects
+ if own:
+ query_own = model.get_query_owns(request.user)
+ if query_own:
+ q = q.filter(query_own)
+ try:
+ item = q.get(pk=pk)
+ except ObjectDoesNotExist:
+ return HttpResponse('NOK')
+ doc_type = 'type' in dct and dct.pop('type')
+ url_name = u"/".join(reverse('show-' + name, args=['0', '']
+ ).split('/')[:-2]) + u"/"
+ dct['CURRENCY'] = get_current_profile().currency
+ dct['ENCODING'] = settings.ENCODING
+ dct['DOT_GENERATION'] = settings.DOT_BINARY and True
+ dct['current_window_url'] = url_name
+ date = None
+ if 'date' in dct:
+ date = dct.pop('date')
+ dct['sheet_id'] = "%s-%d" % (name, item.pk)
+ dct['window_id'] = "%s-%d-%s" % (
+ name, item.pk, datetime.datetime.now().strftime('%M%s'))
+ if hasattr(item, 'history'):
+ if date:
+ try:
+ date = datetime.datetime.strptime(date,
+ '%Y-%m-%dT%H:%M:%S.%f')
+ item = item.get_previous(date=date)
+ assert item is not None
+ except (ValueError, AssertionError):
+ return HttpResponse(None, content_type='text/plain')
+ dct['previous'] = item._previous
+ dct['next'] = item._next
+ else:
+ historized = item.history.all()
+ if historized:
+ item.history_date = historized[0].history_date
+ if len(historized) > 1:
+ dct['previous'] = historized[1].history_date
+ dct['item'], dct['item_name'] = item, name
+ # add context
+ if extra_dct:
+ dct.update(extra_dct(request, item))
+ context_instance = deepcopy(dct)
+ context_instance['output'] = 'html'
+ if hasattr(item, 'history_object'):
+ filename = item.history_object.associated_filename
+ else:
+ filename = item.associated_filename
+ if doc_type == "odt" and settings.ODT_TEMPLATE:
+ tpl = loader.get_template('ishtar/sheet_%s.html' % name)
+ context_instance['output'] = 'ODT'
+ content = tpl.render(context_instance, request)
+ try:
+ tidy_options = {'output-xhtml': 1, 'indent': 1,
+ 'tidy-mark': 0, 'doctype': 'auto',
+ 'add-xml-decl': 1, 'wrap': 1}
+ html, errors = tidy(content, options=tidy_options)
+ html = html.encode('utf-8').replace(" ", " ")
+ html = re.sub('<pre([^>]*)>\n', '<pre\\1>', html)
+
+ odt = NamedTemporaryFile()
+ options = optparse.Values()
+ options.with_network = True
+ for k, v in (('input', ''),
+ ('output', odt.name),
+ ('template', settings.ODT_TEMPLATE),
+ ('with_network', True),
+ ('top_header_level', 1),
+ ('img_width', '8cm'),
+ ('img_height', '6cm'),
+ ('verbose', False),
+ ('replace_keyword', 'ODT-INSERT'),
+ ('cut_start', 'ODT-CUT-START'),
+ ('htmlid', None),
+ ('url', "#")):
+ setattr(options, k, v)
+ odtfile = xhtml2odt.ODTFile(options)
+ odtfile.open()
+ odtfile.import_xhtml(html)
+ odtfile = odtfile.save()
+ except xhtml2odt.ODTExportError:
+ return HttpResponse(content, content_type="application/xhtml")
+ response = HttpResponse(
+ content_type='application/vnd.oasis.opendocument.text')
+ response['Content-Disposition'] = 'attachment; filename=%s.odt' % \
+ filename
+ response.write(odtfile)
+ return response
+ elif doc_type == 'pdf':
+ tpl = loader.get_template('ishtar/sheet_%s_pdf.html' % name)
+ context_instance['output'] = 'PDF'
+ html = tpl.render(context_instance, request)
+ font_config = FontConfiguration()
+ css = CSS(string='''
+ @font-face {
+ font-family: Gentium;
+ src: url(%s);
+ }
+ body{
+ font-family: Gentium
+ }
+ ''' % (static("gentium/GentiumPlus-R.ttf")))
+ css2 = CSS(filename=settings.STATIC_ROOT + '/media/style_basic.css')
+ pdf = HTML(string=html, base_url=request.build_absolute_uri()
+ ).write_pdf(stylesheets=[css, css2],
+ font_config=font_config)
+ response = HttpResponse(pdf, content_type='application/pdf')
+ response['Content-Disposition'] = 'attachment; filename=%s.pdf' % \
+ filename
+ return response
+ else:
+ tpl = loader.get_template('ishtar/sheet_%s_window.html' % name)
+ content = tpl.render(context_instance, request)
+ return HttpResponse(content, content_type="application/xhtml")
+ return func
+
+
+def revert_item(model):
+ def func(request, pk, date, **dct):
+ try:
+ item = model.objects.get(pk=pk)
+ date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f')
+ item.rollback(date)
+ except (ObjectDoesNotExist, ValueError, HistoryError):
+ return HttpResponse(None, content_type='text/plain')
+ return HttpResponse("True", content_type='text/plain')
+ return func
+
+
+HIERARCHIC_LEVELS = 5
+HIERARCHIC_FIELDS = ['periods', 'period', 'unit', 'material_types',
+ 'material_type', 'conservatory_state', 'object_types']
+
+
+def _get_values(request, val):
+ if hasattr(val, 'all'): # manage related objects
+ vals = list(val.all())
+ else:
+ vals = [val]
+ new_vals = []
+ for v in vals:
+ if callable(v):
+ v = v()
+ if hasattr(v, 'url'):
+ v = request.is_secure() and \
+ 'https' or 'http' + '://' + \
+ request.get_host() + v.url
+ new_vals.append(v)
+ return new_vals
+
+
+def _search_manage_search_vector(dct):
+ if 'search_vector' in dct:
+ dct['search_vector'] = SearchQuery(
+ unidecode(dct['search_vector']),
+ config=settings.ISHTAR_SEARCH_LANGUAGE
+ )
+ return dct
+
+
+def _format_val(val):
+ if val is None:
+ return u""
+ if type(val) == bool:
+ if val:
+ return unicode(_(u"True"))
+ else:
+ return unicode(_(u"False"))
+ if type(val) == str:
+ val = val.decode('utf-8')
+ return unicode(val)
+
+
+DEFAULT_ROW_NUMBER = 10
+# length is used by ajax DataTables requests
+EXCLUDED_FIELDS = ['length']
+
+
+def get_item(model, func_name, default_name, extra_request_keys=[],
+ base_request=None, bool_fields=[], reversed_bool_fields=[],
+ dated_fields=[], associated_models=[], relative_session_names=[],
+ specific_perms=[], own_table_cols=None, relation_types_prefix={},
+ do_not_deduplicate=False):
+ """
+ Generic treatment of tables
+
+ :param model: model used for query
+ :param func_name: name of the function (used for session storage)
+ :param default_name: key used for default search in session
+ :param extra_request_keys: default query limitation
+ :param base_request:
+ :param bool_fields:
+ :param reversed_bool_fields:
+ :param dated_fields:
+ :param associated_models:
+ :param relative_session_names:
+ :param specific_perms:
+ :param own_table_cols:
+ :param relation_types_prefix:
+ :param do_not_deduplicate: duplication of id can occurs on large queryset a
+ mecanism of deduplication is used. But duplicate ids can be normal (for
+ instance for record_relations view).
+ :return:
+ """
+ def func(request, data_type='json', full=False, force_own=False,
+ col_names=None, **dct):
+ available_perms = []
+ if specific_perms:
+ available_perms = specific_perms[:]
+ EMPTY = ''
+ if 'type' in dct:
+ data_type = dct.pop('type')
+ if not data_type:
+ EMPTY = '[]'
+ data_type = 'json'
+
+ allowed, own = check_model_access_control(request, model,
+ available_perms)
+ if not allowed:
+ return HttpResponse(EMPTY, content_type='text/plain')
+
+ if force_own:
+ own = True
+ if full == 'shortcut' and 'SHORTCUT_SEARCH' in request.session and \
+ request.session['SHORTCUT_SEARCH'] == 'own':
+ own = True
+
+ # get defaults from model
+ if not extra_request_keys and hasattr(model, 'EXTRA_REQUEST_KEYS'):
+ my_extra_request_keys = copy(model.EXTRA_REQUEST_KEYS)
+ else:
+ my_extra_request_keys = copy(extra_request_keys)
+ if base_request is None and hasattr(model, 'BASE_REQUEST'):
+ my_base_request = copy(model.BASE_REQUEST)
+ elif base_request is not None:
+ my_base_request = copy(base_request)
+ else:
+ my_base_request = {}
+ if not bool_fields and hasattr(model, 'BOOL_FIELDS'):
+ my_bool_fields = model.BOOL_FIELDS[:]
+ else:
+ my_bool_fields = bool_fields[:]
+ if not reversed_bool_fields and hasattr(model, 'REVERSED_BOOL_FIELDS'):
+ my_reversed_bool_fields = model.REVERSED_BOOL_FIELDS[:]
+ else:
+ my_reversed_bool_fields = reversed_bool_fields[:]
+ if not dated_fields and hasattr(model, 'DATED_FIELDS'):
+ my_dated_fields = model.DATED_FIELDS[:]
+ else:
+ my_dated_fields = dated_fields[:]
+ if not associated_models and hasattr(model, 'ASSOCIATED_MODELS'):
+ my_associated_models = model.ASSOCIATED_MODELS[:]
+ else:
+ my_associated_models = associated_models[:]
+ if not relative_session_names and hasattr(model,
+ 'RELATIVE_SESSION_NAMES'):
+ my_relative_session_names = model.RELATIVE_SESSION_NAMES[:]
+ else:
+ my_relative_session_names = relative_session_names[:]
+ if not relation_types_prefix and hasattr(model,
+ 'RELATION_TYPES_PREFIX'):
+ my_relation_types_prefix = copy(model.RELATION_TYPES_PREFIX)
+ else:
+ my_relation_types_prefix = copy(relation_types_prefix)
+
+ fields = [model._meta.get_field(k)
+ for k in get_all_field_names(model)]
+
+ request_keys = dict([
+ (field.name,
+ field.name + (hasattr(field, 'rel') and field.rel and '__pk'
+ or ''))
+ for field in fields])
+ for associated_model, key in my_associated_models:
+ if type(associated_model) in (str, unicode):
+ if associated_model not in globals():
+ continue
+ associated_model = globals()[associated_model]
+ associated_fields = [
+ associated_model._meta.get_field(k)
+ for k in get_all_field_names(associated_model)]
+ request_keys.update(
+ dict([(key + "__" + field.name,
+ key + "__" + field.name +
+ (hasattr(field, 'rel') and field.rel and '__pk' or ''))
+ for field in associated_fields]))
+ request_keys.update(my_extra_request_keys)
+ request_items = request.method == 'POST' and request.POST \
+ or request.GET
+
+ # pager
+ try:
+ row_nb = int(request_items.get('length'))
+ except (ValueError, TypeError):
+ row_nb = DEFAULT_ROW_NUMBER
+ dct_request_items = {}
+
+ # filter requested fields
+ for k in request_items:
+ if k in EXCLUDED_FIELDS:
+ continue
+ key = k[:]
+ if key.startswith('searchprefix_'):
+ key = key[len('searchprefix_'):]
+ dct_request_items[key] = request_items[k]
+ request_items = dct_request_items
+
+ dct = my_base_request
+ if full == 'shortcut':
+ dct['cached_label__icontains'] = request.GET.get('term', None)
+ and_reqs, or_reqs = [], []
+ try:
+ old = 'old' in request_items and int(request_items['old'])
+ except ValueError:
+ return HttpResponse('[]', content_type='text/plain')
+
+ # manage relations types
+ if 'relation_types' not in my_relation_types_prefix:
+ my_relation_types_prefix['relation_types'] = ''
+ relation_types = {}
+ for rtype_key in my_relation_types_prefix:
+ relation_types[my_relation_types_prefix[rtype_key]] = set()
+ for k in request_items:
+ if k.startswith(rtype_key):
+ relation_types[my_relation_types_prefix[rtype_key]].add(
+ request_items[k])
+ continue
+
+ for k in request_keys:
+ val = request_items.get(k)
+ if not val:
+ continue
+ req_keys = request_keys[k]
+ if type(req_keys) not in (list, tuple):
+ dct[req_keys] = val
+ continue
+ # multiple choice target
+ reqs = Q(**{req_keys[0]: val})
+ for req_key in req_keys[1:]:
+ q = Q(**{req_key: val})
+ reqs |= q
+ and_reqs.append(reqs)
+
+ pinned_search = ""
+ if 'submited' not in request_items and full != 'shortcut':
+ # default search
+ # an item is selected in the default menu
+ if default_name in request.session and \
+ request.session[default_name]:
+ value = request.session[default_name]
+ if 'basket-' in value:
+ try:
+ dct = {"basket__pk":
+ request.session[default_name].split('-')[-1]}
+ pinned_search = unicode(FindBasket.objects.get(
+ pk=dct["basket__pk"]))
+ except FindBasket.DoesNotExist:
+ pass
+ else:
+ try:
+ dct = {"pk": request.session[default_name]}
+ pinned_search = unicode(model._meta.verbose_name) \
+ + u" - " + unicode(
+ model.objects.get(pk=dct["pk"]))
+ except model.DoesNotExist:
+ pass
+ elif dct == (my_base_request or {}):
+ # a parent item may be selected in the default menu
+ for name, key in my_relative_session_names:
+ if name in request.session and request.session[name] \
+ and 'basket-' not in request.session[name] \
+ and name in CURRENT_ITEM_KEYS_DICT:
+ up_model = CURRENT_ITEM_KEYS_DICT[name]
+ try:
+ dct.update({key: request.session[name]})
+ pinned_search = unicode(up_model._meta.verbose_name) \
+ + u" - " + unicode(
+ up_model.objects.get(pk=dct[key]))
+ break
+ except up_model.DoesNotExist:
+ pass
+ if (not dct or data_type == 'csv') \
+ and func_name in request.session:
+ dct = request.session[func_name]
+ else:
+ request.session[func_name] = dct
+ for k in (list(my_bool_fields) + list(my_reversed_bool_fields)):
+ if k in dct:
+ if dct[k] == u"1":
+ dct.pop(k)
+ else:
+ dct[k] = dct[k] == u"2" and True or False
+ if k in my_reversed_bool_fields:
+ dct[k] = not dct[k]
+ # check also for empty value with image field
+ field_name = k.split('__')[0]
+ # TODO: can be improved in later version of Django
+ try:
+ c_field = model._meta.get_field(field_name)
+ if k.endswith('__isnull') and \
+ isinstance(c_field, ImageField):
+ if dct[k]:
+ or_reqs.append(
+ (k, {k.split('__')[0] + '__exact': ''}))
+ else:
+ dct[k.split('__')[0] + '__regex'] = '.{1}.*'
+ except FieldDoesNotExist:
+ pass
+ for k in my_dated_fields:
+ if k in dct:
+ if not dct[k]:
+ dct.pop(k)
+ try:
+ items = dct[k].split('/')
+ assert len(items) == 3
+ dct[k] = datetime.date(*map(lambda x: int(x),
+ reversed(items))) \
+ .strftime('%Y-%m-%d')
+ except AssertionError:
+ dct.pop(k)
+ # manage hierarchic conditions
+ for req in dct.copy():
+ if req.endswith('town__pk') or req.endswith('towns__pk'):
+ val = dct.pop(req)
+ reqs = Q(**{req: val})
+ base_req = req[:-2] + '__'
+ req = base_req[:]
+ for idx in range(HIERARCHIC_LEVELS):
+ req = req[:-2] + 'parents__pk'
+ q = Q(**{req: val})
+ reqs |= q
+ req = base_req[:]
+ for idx in range(HIERARCHIC_LEVELS):
+ req = req[:-2] + 'children__pk'
+ q = Q(**{req: val})
+ reqs |= q
+ and_reqs.append(reqs)
+ continue
+
+ for k_hr in HIERARCHIC_FIELDS:
+ if type(req) in (list, tuple):
+ val = dct.pop(req)
+ q = None
+ for idx, r in enumerate(req):
+ if not idx:
+ q = Q(**{r: val})
+ else:
+ q |= Q(**{r: val})
+ and_reqs.append(q)
+ break
+ elif req.endswith(k_hr + '__pk'):
+ val = dct.pop(req)
+ reqs = Q(**{req: val})
+ req = req[:-2] + '__'
+ for idx in range(HIERARCHIC_LEVELS):
+ req = req[:-2] + 'parent__pk'
+ q = Q(**{req: val})
+ reqs |= q
+ and_reqs.append(reqs)
+ break
+ dct = _search_manage_search_vector(dct)
+ query = Q(**dct)
+ for k, or_req in or_reqs:
+ alt_dct = dct.copy()
+ alt_dct.pop(k)
+ alt_dct.update(or_req)
+ query |= Q(**alt_dct)
+
+ for rtype_prefix in relation_types:
+ vals = list(relation_types[rtype_prefix])
+ if not vals:
+ continue
+ alt_dct = {
+ rtype_prefix + 'right_relations__relation_type__pk__in': vals}
+ for k in dct:
+ val = dct[k]
+ if rtype_prefix:
+ # only get conditions related to the object
+ if rtype_prefix not in k:
+ continue
+ # tricky: reconstruct the key to make sense - remove the
+ # prefix from the key
+ k = k[0:k.index(rtype_prefix)] + k[
+ k.index(rtype_prefix) + len(rtype_prefix):]
+ if k.endswith('year'):
+ k += '__exact'
+ alt_dct[rtype_prefix + 'right_relations__right_record__' + k] = \
+ val
+ if not dct:
+ # fake condition to trick Django (1.4): without it only the
+ # alt_dct is managed
+ query &= Q(pk__isnull=False)
+ query |= Q(**alt_dct)
+ for k, or_req in or_reqs:
+ altor_dct = alt_dct.copy()
+ altor_dct.pop(k)
+ for j in or_req:
+ val = or_req[j]
+ if j == 'year':
+ j = 'year__exact'
+ altor_dct[
+ rtype_prefix + 'right_relations__right_record__' + j] = \
+ val
+ query |= Q(**altor_dct)
+
+ if own:
+ q = models.IshtarUser.objects.filter(user_ptr=request.user)
+ if q.count():
+ query = query & model.get_query_owns(q.all()[0])
+ else:
+ return HttpResponse(EMPTY, content_type='text/plain')
+
+ for and_req in and_reqs:
+ query = query & and_req
+
+ # manage hierarchic in shortcut menu
+ if full == 'shortcut':
+ ASSOCIATED_ITEMS = {
+ Operation: (File, 'associated_file__pk'),
+ ContextRecord: (Operation, 'operation__pk'),
+ Find: (ContextRecord, 'base_finds__context_record__pk'),
+ }
+ if model in ASSOCIATED_ITEMS:
+ upper_model, upper_key = ASSOCIATED_ITEMS[model]
+ model_name = upper_model.SLUG
+ current = model_name in request.session \
+ and request.session[model_name]
+ if current:
+ dct = {upper_key: current}
+ query &= Q(**dct)
+
+ items = model.objects.filter(query).distinct()
+ # print(items.query)
+
+ if 'search_vector' in dct: # for serialization
+ dct['search_vector'] = dct['search_vector'].value
+
+ # table cols
+ if own_table_cols:
+ table_cols = own_table_cols
+ else:
+ if full:
+ table_cols = [field.name for field in model._meta.fields
+ if field.name not in PRIVATE_FIELDS]
+ table_cols += [field.name for field in model._meta.many_to_many
+ if field.name not in PRIVATE_FIELDS]
+ if hasattr(model, 'EXTRA_FULL_FIELDS'):
+ table_cols += model.EXTRA_FULL_FIELDS
+ else:
+ table_cols = model.TABLE_COLS
+ query_table_cols = []
+ for cols in table_cols:
+ if type(cols) not in (list, tuple):
+ cols = [cols]
+ for col in cols:
+ query_table_cols += col.split('|')
+
+ # contextual (full, simple, etc.) col
+ contxt = full and 'full' or 'simple'
+ if hasattr(model, 'CONTEXTUAL_TABLE_COLS') and \
+ contxt in model.CONTEXTUAL_TABLE_COLS:
+ for idx, col in enumerate(table_cols):
+ if col in model.CONTEXTUAL_TABLE_COLS[contxt]:
+ query_table_cols[idx] = \
+ model.CONTEXTUAL_TABLE_COLS[contxt][col]
+ if full == 'shortcut':
+ query_table_cols = ['cached_label']
+ table_cols = ['cached_label']
+
+ # manage sort tables
+ manual_sort_key = None
+
+ sorts = {}
+ for k in request_items:
+ if not k.startswith('order['):
+ continue
+ num = int(k.split(']')[0][len("order["):])
+ if num not in sorts:
+ sorts[num] = ['', ''] # sign, col_num
+ if k.endswith('[dir]'):
+ order = request_items[k]
+ sign = order and order == u'desc' and "-" or ''
+ sorts[num][0] = sign
+ if k.endswith('[column]'):
+ sorts[num][1] = request_items[k]
+ sign = ""
+ if not sorts and model._meta.ordering:
+ orders = [k for k in model._meta.ordering]
+ items = items.order_by(*orders)
+ else:
+ orders = []
+ for idx in sorted(sorts.keys()):
+ signe, col_num = sorts[idx]
+ k = query_table_cols[int(col_num) - 2] # remove id and link col
+ if k in request_keys:
+ ks = request_keys[k]
+ if type(ks) not in (tuple, list):
+ ks = [ks]
+ for k in ks:
+ if k.endswith("__pk"):
+ k = k[:-len("__pk")] + "__label"
+ if '__' in k:
+ k = k.split('__')[0]
+ orders.append(signe + k)
+ else:
+ # not a standard request key
+ if idx: # not the first - we ignore this sort
+ continue
+ sign = signe
+ manual_sort_key = k
+ logger.warning(
+ "**WARN get_item - {}**: manual sort key '{}'".format(
+ func_name, k))
+ break
+ if not manual_sort_key:
+ items = items.order_by(*orders)
+
+ # pager management
+ start, end = 0, None
+ page_nb = 1
+ if row_nb and data_type == "json":
+ try:
+ start = int(request_items.get('start'))
+ page_nb = start / row_nb + 1
+ assert page_nb >= 1
+ except (TypeError, ValueError, AssertionError):
+ start = 0
+ page_nb = 1
+ end = page_nb * row_nb
+ if full == 'shortcut':
+ start = 0
+ end = 20
+
+ items_nb = items.count()
+ if manual_sort_key:
+ items = items.all()
+ else:
+ items = items[start:end]
+
+ datas = []
+ if old:
+ items = [item.get_previous(old) for item in items]
+ c_ids = []
+ for item in items:
+ # manual deduplicate when distinct is not enough
+ if not do_not_deduplicate and item.pk in c_ids:
+ continue
+ c_ids.append(item.pk)
+ data = [item.pk]
+ for keys in query_table_cols:
+ if type(keys) not in (list, tuple):
+ keys = [keys]
+ my_vals = []
+ for k in keys:
+ if hasattr(model, 'EXTRA_REQUEST_KEYS') \
+ and k in model.EXTRA_REQUEST_KEYS:
+ k = model.EXTRA_REQUEST_KEYS[k]
+ if type(k) in (list, tuple):
+ k = k[0]
+ for filtr in ('__icontains', '__contains'):
+ if k.endswith(filtr):
+ k = k[:len(k) - len(filtr)]
+ vals = [item]
+ # foreign key may be divided by "." or "__"
+ splitted_k = []
+ for ky in k.split('.'):
+ if '__' in ky:
+ splitted_k += ky.split('__')
+ else:
+ splitted_k.append(ky)
+ for ky in splitted_k:
+ new_vals = []
+ for val in vals:
+ if hasattr(val, 'all'): # manage related objects
+ val = list(val.all())
+ for v in val:
+ v = getattr(v, ky)
+ new_vals += _get_values(request, v)
+ elif val:
+ try:
+ val = getattr(val, ky)
+ new_vals += _get_values(request, val)
+ except AttributeError:
+ # must be a query key such as "contains"
+ pass
+ vals = new_vals
+ # manage last related objects
+ if vals and hasattr(vals[0], 'all'):
+ new_vals = []
+ for val in vals:
+ new_vals += list(val.all())
+ vals = new_vals
+ if not my_vals:
+ my_vals = [_format_val(va) for va in vals]
+ else:
+ new_vals = []
+ if not vals:
+ for idx, my_v in enumerate(my_vals):
+ new_vals.append(u"{}{}{}".format(
+ my_v, u' - ', ''))
+ else:
+ for idx, v in enumerate(vals):
+ new_vals.append(u"{}{}{}".format(
+ vals[idx], u' - ', _format_val(v)))
+ my_vals = new_vals[:]
+ data.append(u" & ".join(my_vals) or u"")
+ datas.append(data)
+ if manual_sort_key:
+ # +1 because the id is added as a first col
+ idx_col = None
+ if manual_sort_key in query_table_cols:
+ idx_col = query_table_cols.index(manual_sort_key) + 1
+ else:
+ for idx, col in enumerate(query_table_cols):
+ if type(col) in (list, tuple) and \
+ manual_sort_key in col:
+ idx_col = idx + 1
+ if idx_col is not None:
+ datas = sorted(datas, key=lambda x: x[idx_col])
+ if sign == '-':
+ datas = reversed(datas)
+ datas = list(datas)[start:end]
+ link_template = "<a class='display_details' href='#' " \
+ "onclick='load_window(\"%s\")'>" \
+ "<i class=\"fa fa-info-circle\" aria-hidden=\"true\"></i></a>"
+ link_ext_template = '<a href="{}" target="_blank">{}</a>'
+ if data_type == "json":
+ rows = []
+ for data in datas:
+ try:
+ lnk = link_template % reverse('show-' + default_name,
+ args=[data[0], ''])
+ except NoReverseMatch:
+ logger.warning(
+ '**WARN "show-' + default_name + '" args ('
+ + unicode(data[0]) + ") url not available")
+ lnk = ''
+ res = {'id': data[0], 'link': lnk}
+ for idx, value in enumerate(data[1:]):
+ if value:
+ table_col = table_cols[idx]
+ if type(table_col) not in (list, tuple):
+ table_col = [table_col]
+ tab_cols = []
+ # foreign key may be divided by "." or "__"
+ for tc in table_col:
+ if '.' in tc:
+ tab_cols += tc.split('.')
+ elif '__' in tc:
+ tab_cols += tc.split('__')
+ else:
+ tab_cols.append(tc)
+ k = "__".join(tab_cols)
+ if hasattr(model, 'COL_LINK') and k in model.COL_LINK:
+ value = link_ext_template.format(value, value)
+ res[k] = value
+ if full == 'shortcut' and 'cached_label' in res:
+ res['value'] = res.pop('cached_label')
+ rows.append(res)
+ if full == 'shortcut':
+ data = json.dumps(rows)
+ else:
+ data = json.dumps({
+ "recordsTotal": items_nb,
+ "recordsFiltered": items_nb,
+ "rows": rows,
+ "pinned-search": pinned_search,
+ "page": page_nb,
+ "total": (items_nb / row_nb + 1) if row_nb else items_nb,
+ })
+ return HttpResponse(data, content_type='text/plain')
+ elif data_type == "csv":
+ response = HttpResponse(content_type='text/csv')
+ n = datetime.datetime.now()
+ filename = u'%s_%s.csv' % (default_name,
+ n.strftime('%Y%m%d-%H%M%S'))
+ response['Content-Disposition'] = 'attachment; filename=%s' \
+ % filename
+ writer = csv.writer(response, **CSV_OPTIONS)
+ if col_names:
+ col_names = [name.encode(ENCODING, errors='replace')
+ for name in col_names]
+ else:
+ col_names = []
+ for field_name in table_cols:
+ if type(field_name) in (list, tuple):
+ field_name = u" & ".join(field_name)
+ if hasattr(model, 'COL_LABELS') and \
+ field_name in model.COL_LABELS:
+ field = model.COL_LABELS[field_name]
+ col_names.append(unicode(field).encode(ENCODING))
+ continue
+ else:
+ try:
+ field = model._meta.get_field(field_name)
+ except:
+ col_names.append(u"".encode(ENCODING))
+ logger.warning(
+ "**WARN get_item - csv export**: no col name "
+ "for {}\nadd explicit label to "
+ "COL_LABELS attribute of "
+ "{}".format(field_name, model))
+ continue
+ col_names.append(
+ unicode(field.verbose_name).encode(ENCODING))
+ writer.writerow(col_names)
+ for data in datas:
+ row, delta = [], 0
+ # regroup cols with join "|"
+ for idx, col_name in enumerate(table_cols):
+ if len(data[1:]) <= idx + delta:
+ break
+ val = data[1:][idx + delta].encode(
+ ENCODING, errors='replace')
+ if col_name and "|" in col_name[0]:
+ for delta_idx in range(
+ len(col_name[0].split('|')) - 1):
+ delta += 1
+ val += data[1:][idx + delta].encode(
+ ENCODING, errors='replace')
+ row.append(val)
+ writer.writerow(row)
+ return response
+ return HttpResponse('{}', content_type='text/plain')
+
+ return func