summaryrefslogtreecommitdiff
path: root/ishtar_common/views_item.py
blob: b286ab3bfa085c459a053f7e638282dd457317c5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import csv
import datetime
import json
import logging
import optparse
import re
from copy import copy, deepcopy
from tempfile import NamedTemporaryFile

from django.conf import settings
from django.contrib.gis.geos import GEOSException
from django.contrib.postgres.search import SearchQuery
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models import Q, ImageField
from django.db.models.fields import FieldDoesNotExist
from django.http import HttpResponse
from django.shortcuts import render
from django.template import loader
from django.utils.translation import ugettext, ugettext_lazy as _
from tidylib import tidy_document as tidy
from unidecode import unidecode
from weasyprint import HTML, CSS
from weasyprint.fonts import FontConfiguration
from xhtml2odt import xhtml2odt

from ishtar_common.utils import check_model_access_control, CSV_OPTIONS, \
    get_all_field_names
from ishtar_common.models import HistoryError, get_current_profile, \
    PRIVATE_FIELDS
from menus import Menu

import models
from archaeological_files.models import File
from archaeological_operations.models import Operation
from archaeological_context_records.models import ContextRecord
from archaeological_finds.models import Find, FindBasket, Treatment, \
    TreatmentFile

logger = logging.getLogger(__name__)

ENCODING = settings.ENCODING or 'utf-8'

CURRENT_ITEM_KEYS = (('file', File),
                     ('operation', Operation),
                     ('contextrecord', ContextRecord),
                     ('find', Find),
                     ('treatmentfile', TreatmentFile),
                     ('treatment', Treatment))
CURRENT_ITEM_KEYS_DICT = dict(CURRENT_ITEM_KEYS)


def get_autocomplete_item(model, extra=None):
    if not extra:
        extra = {}

    def func(request, current_right=None):
        q = request.GET.get('term') or ""
        query = Q(**extra)
        for q in q.split(' '):
            if not q:
                continue
            query = query & Q(cached_label__icontains=q)
        limit = 20
        objects = model.objects.filter(query)[:limit]
        data = json.dumps([{'id': obj.pk, 'value': obj.cached_label}
                           for obj in objects])
        return HttpResponse(data, content_type='text/plain')
    return func


def check_permission(request, action_slug, obj_id=None):
    MAIN_MENU = Menu(request.user)
    MAIN_MENU.init()
    if action_slug not in MAIN_MENU.items:
        # TODO
        return True
    if obj_id:
        return MAIN_MENU.items[action_slug].is_available(
            request.user, obj_id, session=request.session)
    return MAIN_MENU.items[action_slug].can_be_available(
        request.user, session=request.session)


def new_item(model, frm, many=False):
    def func(request, parent_name, limits=''):
        model_name = model._meta.object_name
        if not check_permission(request, 'add_' + model_name.lower()):
            not_permitted_msg = ugettext(u"Operation not permitted.")
            return HttpResponse(not_permitted_msg)
        dct = {'title': unicode(_(u'New %s' % model_name.lower())),
               'many': many}
        if request.method == 'POST':
            dct['form'] = frm(request.POST, limits=limits)
            if dct['form'].is_valid():
                new_item = dct['form'].save(request.user)
                dct['new_item_label'] = unicode(new_item)
                dct['new_item_pk'] = new_item.pk
                dct['parent_name'] = parent_name
                dct['parent_pk'] = parent_name
                if dct['parent_pk'] and '_select_' in dct['parent_pk']:
                    parents = dct['parent_pk'].split('_')
                    dct['parent_pk'] = "_".join([parents[0]] + parents[2:])
                return render(request, 'window.html', dct)
        else:
            dct['form'] = frm(limits=limits)
        return render(request, 'window.html', dct)
    return func


def display_item(model, extra_dct=None, show_url=None):
    def func(request, pk, **dct):
        if show_url:
            dct['show_url'] = "/{}{}/".format(show_url, pk)
        else:
            dct['show_url'] = "/show-{}/{}/".format(model.SLUG, pk)
        return render(request, 'ishtar/display_item.html', dct)
    return func


def show_item(model, name, extra_dct=None):
    def func(request, pk, **dct):
        allowed, own = check_model_access_control(request, model)
        if not allowed:
            return HttpResponse('', content_type="application/xhtml")
        q = model.objects
        if own:
            query_own = model.get_query_owns(request.user)
            if query_own:
                q = q.filter(query_own)
        try:
            item = q.get(pk=pk)
        except ObjectDoesNotExist:
            return HttpResponse('NOK')
        doc_type = 'type' in dct and dct.pop('type')
        url_name = u"/".join(reverse('show-' + name, args=['0', '']
                                     ).split('/')[:-2]) + u"/"
        dct['CURRENCY'] = get_current_profile().currency
        dct['ENCODING'] = settings.ENCODING
        dct['DOT_GENERATION'] = settings.DOT_BINARY and True
        dct['current_window_url'] = url_name
        date = None
        if 'date' in dct:
            date = dct.pop('date')
        dct['sheet_id'] = "%s-%d" % (name, item.pk)
        dct['window_id'] = "%s-%d-%s" % (
            name, item.pk, datetime.datetime.now().strftime('%M%s'))
        if hasattr(item, 'history'):
            if date:
                try:
                    date = datetime.datetime.strptime(date,
                                                      '%Y-%m-%dT%H:%M:%S.%f')
                    item = item.get_previous(date=date)
                    assert item is not None
                except (ValueError, AssertionError):
                    return HttpResponse(None, content_type='text/plain')
                dct['previous'] = item._previous
                dct['next'] = item._next
            else:
                historized = item.history.all()
                if historized:
                    item.history_date = historized[0].history_date
                if len(historized) > 1:
                    dct['previous'] = historized[1].history_date
        dct['item'], dct['item_name'] = item, name
        # add context
        if extra_dct:
            dct.update(extra_dct(request, item))
        context_instance = deepcopy(dct)
        context_instance['output'] = 'html'
        if hasattr(item, 'history_object'):
            filename = item.history_object.associated_filename
        else:
            filename = item.associated_filename
        if doc_type == "odt" and settings.ODT_TEMPLATE:
            tpl = loader.get_template('ishtar/sheet_%s.html' % name)
            context_instance['output'] = 'ODT'
            content = tpl.render(context_instance, request)
            try:
                tidy_options = {'output-xhtml': 1, 'indent': 1,
                                'tidy-mark': 0, 'doctype': 'auto',
                                'add-xml-decl': 1, 'wrap': 1}
                html, errors = tidy(content, options=tidy_options)
                html = html.encode('utf-8').replace(" ", " ")
                html = re.sub('<pre([^>]*)>\n', '<pre\\1>', html)

                odt = NamedTemporaryFile()
                options = optparse.Values()
                options.with_network = True
                for k, v in (('input', ''),
                             ('output', odt.name),
                             ('template', settings.ODT_TEMPLATE),
                             ('with_network', True),
                             ('top_header_level', 1),
                             ('img_width', '8cm'),
                             ('img_height', '6cm'),
                             ('verbose', False),
                             ('replace_keyword', 'ODT-INSERT'),
                             ('cut_start', 'ODT-CUT-START'),
                             ('htmlid', None),
                             ('url', "#")):
                    setattr(options, k, v)
                odtfile = xhtml2odt.ODTFile(options)
                odtfile.open()
                odtfile.import_xhtml(html)
                odtfile = odtfile.save()
            except xhtml2odt.ODTExportError:
                return HttpResponse(content, content_type="application/xhtml")
            response = HttpResponse(
                content_type='application/vnd.oasis.opendocument.text')
            response['Content-Disposition'] = 'attachment; filename=%s.odt' % \
                                              filename
            response.write(odtfile)
            return response
        elif doc_type == 'pdf':
            tpl = loader.get_template('ishtar/sheet_%s_pdf.html' % name)
            context_instance['output'] = 'PDF'
            html = tpl.render(context_instance, request)
            font_config = FontConfiguration()
            css = CSS(string='''
            @font-face {
                font-family: Gentium;
                src: url(%s);
            }
            body{
                font-family: Gentium
            }
            ''' % (static("gentium/GentiumPlus-R.ttf")))
            css2 = CSS(filename=settings.STATIC_ROOT + '/media/style_basic.css')
            pdf = HTML(string=html, base_url=request.build_absolute_uri()
                       ).write_pdf(stylesheets=[css, css2],
                                   font_config=font_config)
            response = HttpResponse(pdf, content_type='application/pdf')
            response['Content-Disposition'] = 'attachment; filename=%s.pdf' % \
                                              filename
            return response
        else:
            tpl = loader.get_template('ishtar/sheet_%s_window.html' % name)
            content = tpl.render(context_instance, request)
            return HttpResponse(content, content_type="application/xhtml")
    return func


def revert_item(model):
    def func(request, pk, date, **dct):
        try:
            item = model.objects.get(pk=pk)
            date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f')
            item.rollback(date)
        except (ObjectDoesNotExist, ValueError, HistoryError):
            return HttpResponse(None, content_type='text/plain')
        return HttpResponse("True", content_type='text/plain')
    return func


HIERARCHIC_LEVELS = 5
HIERARCHIC_FIELDS = ['periods', 'period', 'unit', 'material_types',
                     'material_type', 'conservatory_state', 'object_types']


def _get_values(request, val):
    if hasattr(val, 'all'):  # manage related objects
        vals = list(val.all())
    else:
        vals = [val]
    new_vals = []
    for v in vals:
        if callable(v):
            v = v()
        if hasattr(v, 'url'):
            v = request.is_secure() and \
                'https' or 'http' + '://' + \
                request.get_host() + v.url
        new_vals.append(v)
    return new_vals


def _search_manage_search_vector(dct):
    if 'search_vector' in dct:
        dct['search_vector'] = SearchQuery(
            unidecode(dct['search_vector']),
            config=settings.ISHTAR_SEARCH_LANGUAGE
        )
    return dct


def _format_val(val):
    if val is None:
        return u""
    if type(val) == bool:
        if val:
            return unicode(_(u"True"))
        else:
            return unicode(_(u"False"))
    if type(val) == str:
        val = val.decode('utf-8')
    return unicode(val)


DEFAULT_ROW_NUMBER = 10
# length is used by ajax DataTables requests
EXCLUDED_FIELDS = ['length']


def get_item(model, func_name, default_name, extra_request_keys=[],
             base_request=None, bool_fields=[], reversed_bool_fields=[],
             dated_fields=[], associated_models=[], relative_session_names=[],
             specific_perms=[], own_table_cols=None, relation_types_prefix={},
             do_not_deduplicate=False):
    """
    Generic treatment of tables

    :param model: model used for query
    :param func_name: name of the function (used for session storage)
    :param default_name: key used for default search in session
    :param extra_request_keys: default query limitation
    :param base_request:
    :param bool_fields:
    :param reversed_bool_fields:
    :param dated_fields:
    :param associated_models:
    :param relative_session_names:
    :param specific_perms:
    :param own_table_cols:
    :param relation_types_prefix:
    :param do_not_deduplicate: duplication of id can occurs on large queryset a
    mecanism of deduplication is used. But duplicate ids can be normal (for
    instance for record_relations view).
    :return:
    """
    def func(request, data_type='json', full=False, force_own=False,
             col_names=None, **dct):
        available_perms = []
        if specific_perms:
            available_perms = specific_perms[:]
        EMPTY = ''
        if 'type' in dct:
            data_type = dct.pop('type')
        if not data_type:
            EMPTY = '[]'
            data_type = 'json'

        allowed, own = check_model_access_control(request, model,
                                                  available_perms)
        if not allowed:
            return HttpResponse(EMPTY, content_type='text/plain')

        if force_own:
            own = True
        if full == 'shortcut' and 'SHORTCUT_SEARCH' in request.session and \
                request.session['SHORTCUT_SEARCH'] == 'own':
            own = True

        # get defaults from model
        if not extra_request_keys and hasattr(model, 'EXTRA_REQUEST_KEYS'):
            my_extra_request_keys = copy(model.EXTRA_REQUEST_KEYS)
        else:
            my_extra_request_keys = copy(extra_request_keys)
        if base_request is None and hasattr(model, 'BASE_REQUEST'):
            my_base_request = copy(model.BASE_REQUEST)
        elif base_request is not None:
            my_base_request = copy(base_request)
        else:
            my_base_request = {}
        if not bool_fields and hasattr(model, 'BOOL_FIELDS'):
            my_bool_fields = model.BOOL_FIELDS[:]
        else:
            my_bool_fields = bool_fields[:]
        if not reversed_bool_fields and hasattr(model, 'REVERSED_BOOL_FIELDS'):
            my_reversed_bool_fields = model.REVERSED_BOOL_FIELDS[:]
        else:
            my_reversed_bool_fields = reversed_bool_fields[:]
        if not dated_fields and hasattr(model, 'DATED_FIELDS'):
            my_dated_fields = model.DATED_FIELDS[:]
        else:
            my_dated_fields = dated_fields[:]
        if not associated_models and hasattr(model, 'ASSOCIATED_MODELS'):
            my_associated_models = model.ASSOCIATED_MODELS[:]
        else:
            my_associated_models = associated_models[:]
        if not relative_session_names and hasattr(model,
                                                  'RELATIVE_SESSION_NAMES'):
            my_relative_session_names = model.RELATIVE_SESSION_NAMES[:]
        else:
            my_relative_session_names = relative_session_names[:]
        if not relation_types_prefix and hasattr(model,
                                                 'RELATION_TYPES_PREFIX'):
            my_relation_types_prefix = copy(model.RELATION_TYPES_PREFIX)
        else:
            my_relation_types_prefix = copy(relation_types_prefix)

        fields = [model._meta.get_field(k)
                  for k in get_all_field_names(model)]

        request_keys = dict([
            (field.name,
             field.name + (hasattr(field, 'rel') and field.rel and '__pk'
                           or ''))
            for field in fields])
        for associated_model, key in my_associated_models:
            if type(associated_model) in (str, unicode):
                if associated_model not in globals():
                    continue
                associated_model = globals()[associated_model]
            associated_fields = [
                associated_model._meta.get_field(k)
                for k in get_all_field_names(associated_model)]
            request_keys.update(
                dict([(key + "__" + field.name,
                       key + "__" + field.name +
                       (hasattr(field, 'rel') and field.rel and '__pk' or ''))
                      for field in associated_fields]))
        request_keys.update(my_extra_request_keys)
        request_items = request.method == 'POST' and request.POST \
                        or request.GET

        # pager
        try:
            row_nb = int(request_items.get('length'))
        except (ValueError, TypeError):
            row_nb = DEFAULT_ROW_NUMBER
        dct_request_items = {}

        # filter requested fields
        for k in request_items:
            if k in EXCLUDED_FIELDS:
                continue
            key = k[:]
            if key.startswith('searchprefix_'):
                key = key[len('searchprefix_'):]
            dct_request_items[key] = request_items[k]
        request_items = dct_request_items

        dct = my_base_request
        if full == 'shortcut':
            dct['cached_label__icontains'] = request.GET.get('term', None)
        and_reqs, or_reqs = [], []
        try:
            old = 'old' in request_items and int(request_items['old'])
        except ValueError:
            return HttpResponse('[]', content_type='text/plain')

        # manage relations types
        if 'relation_types' not in my_relation_types_prefix:
            my_relation_types_prefix['relation_types'] = ''
        relation_types = {}
        for rtype_key in my_relation_types_prefix:
            relation_types[my_relation_types_prefix[rtype_key]] = set()
            for k in request_items:
                if k.startswith(rtype_key):
                    relation_types[my_relation_types_prefix[rtype_key]].add(
                        request_items[k])
                    continue

        for k in request_keys:
            val = request_items.get(k)
            if not val:
                continue
            req_keys = request_keys[k]
            if type(req_keys) not in (list, tuple):
                dct[req_keys] = val
                continue
            # multiple choice target
            reqs = Q(**{req_keys[0]: val})
            for req_key in req_keys[1:]:
                q = Q(**{req_key: val})
                reqs |= q
            and_reqs.append(reqs)

        pinned_search = ""
        if 'submited' not in request_items and full != 'shortcut':
            # default search
            # an item is selected in the default menu
            if default_name in request.session and \
                    request.session[default_name]:
                value = request.session[default_name]
                if 'basket-' in value:
                    try:
                        dct = {"basket__pk":
                                   request.session[default_name].split('-')[-1]}
                        pinned_search = unicode(FindBasket.objects.get(
                            pk=dct["basket__pk"]))
                    except FindBasket.DoesNotExist:
                        pass
                else:
                    try:
                        dct = {"pk": request.session[default_name]}
                        pinned_search = unicode(model._meta.verbose_name) \
                                        + u" - " + unicode(
                            model.objects.get(pk=dct["pk"]))
                    except model.DoesNotExist:
                        pass
            elif dct == (my_base_request or {}):
                # a parent item may be selected in the default menu
                for name, key in my_relative_session_names:
                    if name in request.session and request.session[name] \
                            and 'basket-' not in request.session[name] \
                            and name in CURRENT_ITEM_KEYS_DICT:
                        up_model = CURRENT_ITEM_KEYS_DICT[name]
                        try:
                            dct.update({key: request.session[name]})
                            pinned_search = unicode(up_model._meta.verbose_name) \
                                            + u" - " + unicode(
                                up_model.objects.get(pk=dct[key]))
                            break
                        except up_model.DoesNotExist:
                            pass
            if (not dct or data_type == 'csv') \
                    and func_name in request.session:
                dct = request.session[func_name]
        else:
            request.session[func_name] = dct
        for k in (list(my_bool_fields) + list(my_reversed_bool_fields)):
            if k in dct:
                if dct[k] == u"1":
                    dct.pop(k)
                else:
                    dct[k] = dct[k] == u"2" and True or False
                    if k in my_reversed_bool_fields:
                        dct[k] = not dct[k]
                    # check also for empty value with image field
                    field_name = k.split('__')[0]
                    # TODO: can be improved in later version of Django
                    try:
                        c_field = model._meta.get_field(field_name)
                        if k.endswith('__isnull') and \
                                isinstance(c_field, ImageField):
                            if dct[k]:
                                or_reqs.append(
                                    (k, {k.split('__')[0] + '__exact': ''}))
                            else:
                                dct[k.split('__')[0] + '__regex'] = '.{1}.*'
                    except FieldDoesNotExist:
                        pass
        for k in my_dated_fields:
            if k in dct:
                if not dct[k]:
                    dct.pop(k)
                try:
                    items = dct[k].split('/')
                    assert len(items) == 3
                    dct[k] = datetime.date(*map(lambda x: int(x),
                                                reversed(items))) \
                        .strftime('%Y-%m-%d')
                except AssertionError:
                    dct.pop(k)
        # manage hierarchic conditions
        for req in dct.copy():
            if req.endswith('areas__pk'):
                val = dct.pop(req)
                reqs = Q(**{req: val})
                base_req = req[:-2] + '__'
                req = base_req[:]
                for idx in range(HIERARCHIC_LEVELS):
                    req = req[:-2] + 'parent__pk'
                    q = Q(**{req: val})
                    reqs |= q
                req = base_req[:]
                for idx in range(HIERARCHIC_LEVELS):
                    req = req[:-2] + 'children__pk'
                    q = Q(**{req: val})
                    reqs |= q
                and_reqs.append(reqs)
                continue

            if req.endswith('town__pk') or req.endswith('towns__pk'):
                val = dct.pop(req)
                reqs = Q(**{req: val})
                base_req = req[:-2] + '__'
                req = base_req[:]
                for idx in range(HIERARCHIC_LEVELS):
                    req = req[:-2] + 'parents__pk'
                    q = Q(**{req: val})
                    reqs |= q
                req = base_req[:]
                for idx in range(HIERARCHIC_LEVELS):
                    req = req[:-2] + 'children__pk'
                    q = Q(**{req: val})
                    reqs |= q
                and_reqs.append(reqs)
                continue

            for k_hr in HIERARCHIC_FIELDS:
                if type(req) in (list, tuple):
                    val = dct.pop(req)
                    q = None
                    for idx, r in enumerate(req):
                        if not idx:
                            q = Q(**{r: val})
                        else:
                            q |= Q(**{r: val})
                    and_reqs.append(q)
                    break
                elif req.endswith(k_hr + '__pk'):
                    val = dct.pop(req)
                    reqs = Q(**{req: val})
                    req = req[:-2] + '__'
                    for idx in range(HIERARCHIC_LEVELS):
                        req = req[:-2] + 'parent__pk'
                        q = Q(**{req: val})
                        reqs |= q
                    and_reqs.append(reqs)
                    break
        dct = _search_manage_search_vector(dct)
        query = Q(**dct)
        for k, or_req in or_reqs:
            alt_dct = dct.copy()
            alt_dct.pop(k)
            alt_dct.update(or_req)
            query |= Q(**alt_dct)

        for rtype_prefix in relation_types:
            vals = list(relation_types[rtype_prefix])
            if not vals:
                continue
            alt_dct = {
                rtype_prefix + 'right_relations__relation_type__pk__in': vals}
            for k in dct:
                val = dct[k]
                if rtype_prefix:
                    # only get conditions related to the object
                    if rtype_prefix not in k:
                        continue
                    # tricky: reconstruct the key to make sense - remove the
                    # prefix from the key
                    k = k[0:k.index(rtype_prefix)] + k[
                                                     k.index(rtype_prefix) + len(rtype_prefix):]
                if k.endswith('year'):
                    k += '__exact'
                alt_dct[rtype_prefix + 'right_relations__right_record__' + k] = \
                    val
            if not dct:
                # fake condition to trick Django (1.4): without it only the
                # alt_dct is managed
                query &= Q(pk__isnull=False)
            query |= Q(**alt_dct)
            for k, or_req in or_reqs:
                altor_dct = alt_dct.copy()
                altor_dct.pop(k)
                for j in or_req:
                    val = or_req[j]
                    if j == 'year':
                        j = 'year__exact'
                    altor_dct[
                        rtype_prefix + 'right_relations__right_record__' + j] = \
                        val
                query |= Q(**altor_dct)

        if own:
            q = models.IshtarUser.objects.filter(user_ptr=request.user)
            if q.count():
                query = query & model.get_query_owns(q.all()[0])
            else:
                return HttpResponse(EMPTY, content_type='text/plain')

        for and_req in and_reqs:
            query = query & and_req

        # manage hierarchic in shortcut menu
        if full == 'shortcut':
            ASSOCIATED_ITEMS = {
                Operation: (File, 'associated_file__pk'),
                ContextRecord: (Operation, 'operation__pk'),
                Find: (ContextRecord, 'base_finds__context_record__pk'),
            }
            if model in ASSOCIATED_ITEMS:
                upper_model, upper_key = ASSOCIATED_ITEMS[model]
                model_name = upper_model.SLUG
                current = model_name in request.session \
                          and request.session[model_name]
                if current:
                    dct = {upper_key: current}
                    query &= Q(**dct)

        items = model.objects.filter(query).distinct()
        # print(items.query)

        if 'search_vector' in dct:  # for serialization
            dct['search_vector'] = dct['search_vector'].value

        # table cols
        if own_table_cols:
            table_cols = own_table_cols
        else:
            if full:
                table_cols = [field.name for field in model._meta.fields
                              if field.name not in PRIVATE_FIELDS]
                table_cols += [field.name for field in model._meta.many_to_many
                               if field.name not in PRIVATE_FIELDS]
                if hasattr(model, 'EXTRA_FULL_FIELDS'):
                    table_cols += model.EXTRA_FULL_FIELDS
            else:
                table_cols = model.TABLE_COLS
        query_table_cols = []
        for cols in table_cols:
            if type(cols) not in (list, tuple):
                cols = [cols]
            for col in cols:
                query_table_cols += col.split('|')

        # contextual (full, simple, etc.) col
        contxt = full and 'full' or 'simple'
        if hasattr(model, 'CONTEXTUAL_TABLE_COLS') and \
                contxt in model.CONTEXTUAL_TABLE_COLS:
            for idx, col in enumerate(table_cols):
                if col in model.CONTEXTUAL_TABLE_COLS[contxt]:
                    query_table_cols[idx] = \
                        model.CONTEXTUAL_TABLE_COLS[contxt][col]
        if full == 'shortcut':
            query_table_cols = ['cached_label']
            table_cols = ['cached_label']

        # manage sort tables
        manual_sort_key = None

        sorts = {}
        for k in request_items:
            if not k.startswith('order['):
                continue
            num = int(k.split(']')[0][len("order["):])
            if num not in sorts:
                sorts[num] = ['', '']  # sign, col_num
            if k.endswith('[dir]'):
                order = request_items[k]
                sign = order and order == u'desc' and "-" or ''
                sorts[num][0] = sign
            if k.endswith('[column]'):
                sorts[num][1] = request_items[k]
        sign = ""
        if not sorts and model._meta.ordering:
            orders = [k for k in model._meta.ordering]
            items = items.order_by(*orders)
        else:
            orders = []
            for idx in sorted(sorts.keys()):
                signe, col_num = sorts[idx]
                k = query_table_cols[int(col_num) - 2]  # remove id and link col
                if k in request_keys:
                    ks = request_keys[k]
                    if type(ks) not in (tuple, list):
                        ks = [ks]
                    for k in ks:
                        if k.endswith("__pk"):
                            k = k[:-len("__pk")] + "__label"
                        if '__' in k:
                            k = k.split('__')[0]
                        orders.append(signe + k)
                else:
                    # not a standard request key
                    if idx:  # not the first - we ignore this sort
                        continue
                    sign = signe
                    manual_sort_key = k
                    logger.warning(
                        "**WARN get_item - {}**: manual sort key '{}'".format(
                            func_name, k))
                    break
            if not manual_sort_key:
                items = items.order_by(*orders)

        # pager management
        start, end = 0, None
        page_nb = 1
        if row_nb and data_type == "json":
            try:
                start = int(request_items.get('start'))
                page_nb = start / row_nb + 1
                assert page_nb >= 1
            except (TypeError, ValueError, AssertionError):
                start = 0
                page_nb = 1
            end = page_nb * row_nb
        if full == 'shortcut':
            start = 0
            end = 20

        items_nb = items.count()
        if manual_sort_key:
            items = items.all()
        else:
            items = items[start:end]

        datas = []
        if old:
            items = [item.get_previous(old) for item in items]
        c_ids = []
        for item in items:
            # manual deduplicate when distinct is not enough
            if not do_not_deduplicate and item.pk in c_ids:
                continue
            c_ids.append(item.pk)
            data = [item.pk]
            for keys in query_table_cols:
                if type(keys) not in (list, tuple):
                    keys = [keys]
                my_vals = []
                for k in keys:
                    if hasattr(model, 'EXTRA_REQUEST_KEYS') \
                            and k in model.EXTRA_REQUEST_KEYS:
                        k = model.EXTRA_REQUEST_KEYS[k]
                        if type(k) in (list, tuple):
                            k = k[0]
                    for filtr in ('__icontains', '__contains'):
                        if k.endswith(filtr):
                            k = k[:len(k) - len(filtr)]
                    vals = [item]
                    # foreign key may be divided by "." or "__"
                    splitted_k = []
                    for ky in k.split('.'):
                        if '__' in ky:
                            splitted_k += ky.split('__')
                        else:
                            splitted_k.append(ky)
                    for ky in splitted_k:
                        new_vals = []
                        for val in vals:
                            if hasattr(val, 'all'):  # manage related objects
                                val = list(val.all())
                                for v in val:
                                    v = getattr(v, ky)
                                    new_vals += _get_values(request, v)
                            elif val:
                                try:
                                    val = getattr(val, ky)
                                    new_vals += _get_values(request, val)
                                except (AttributeError, GEOSException):
                                    # must be a query key such as "contains"
                                    pass
                        vals = new_vals
                    # manage last related objects
                    if vals and hasattr(vals[0], 'all'):
                        new_vals = []
                        for val in vals:
                            new_vals += list(val.all())
                        vals = new_vals
                    if not my_vals:
                        my_vals = [_format_val(va) for va in vals]
                    else:
                        new_vals = []
                        if not vals:
                            for idx, my_v in enumerate(my_vals):
                                new_vals.append(u"{}{}{}".format(
                                    my_v, u' - ', ''))
                        else:
                            for idx, v in enumerate(vals):
                                new_vals.append(u"{}{}{}".format(
                                    vals[idx], u' - ', _format_val(v)))
                        my_vals = new_vals[:]
                data.append(u" & ".join(my_vals) or u"")
            datas.append(data)
        if manual_sort_key:
            # +1 because the id is added as a first col
            idx_col = None
            if manual_sort_key in query_table_cols:
                idx_col = query_table_cols.index(manual_sort_key) + 1
            else:
                for idx, col in enumerate(query_table_cols):
                    if type(col) in (list, tuple) and \
                            manual_sort_key in col:
                        idx_col = idx + 1
            if idx_col is not None:
                datas = sorted(datas, key=lambda x: x[idx_col])
                if sign == '-':
                    datas = reversed(datas)
                datas = list(datas)[start:end]
        link_template = "<a class='display_details' href='#' " \
                        "onclick='load_window(\"%s\")'>" \
                        "<i class=\"fa fa-info-circle\" aria-hidden=\"true\"></i></a>"
        link_ext_template = '<a href="{}" target="_blank">{}</a>'
        if data_type == "json":
            rows = []
            for data in datas:
                try:
                    lnk = link_template % reverse('show-' + default_name,
                                                  args=[data[0], ''])
                except NoReverseMatch:
                    logger.warning(
                        '**WARN "show-' + default_name + '" args ('
                        + unicode(data[0]) + ") url not available")
                    lnk = ''
                res = {'id': data[0], 'link': lnk}
                for idx, value in enumerate(data[1:]):
                    if value:
                        table_col = table_cols[idx]
                        if type(table_col) not in (list, tuple):
                            table_col = [table_col]
                        tab_cols = []
                        # foreign key may be divided by "." or "__"
                        for tc in table_col:
                            if '.' in tc:
                                tab_cols += tc.split('.')
                            elif '__' in tc:
                                tab_cols += tc.split('__')
                            else:
                                tab_cols.append(tc)
                        k = "__".join(tab_cols)
                        if hasattr(model, 'COL_LINK') and k in model.COL_LINK:
                            value = link_ext_template.format(value, value)
                        res[k] = value
                if full == 'shortcut' and 'cached_label' in res:
                    res['value'] = res.pop('cached_label')
                rows.append(res)
            if full == 'shortcut':
                data = json.dumps(rows)
            else:
                data = json.dumps({
                    "recordsTotal": items_nb,
                    "recordsFiltered": items_nb,
                    "rows": rows,
                    "pinned-search": pinned_search,
                    "page": page_nb,
                    "total": (items_nb / row_nb + 1) if row_nb else items_nb,
                })
            return HttpResponse(data, content_type='text/plain')
        elif data_type == "csv":
            response = HttpResponse(content_type='text/csv')
            n = datetime.datetime.now()
            filename = u'%s_%s.csv' % (default_name,
                                       n.strftime('%Y%m%d-%H%M%S'))
            response['Content-Disposition'] = 'attachment; filename=%s' \
                                              % filename
            writer = csv.writer(response, **CSV_OPTIONS)
            if col_names:
                col_names = [name.encode(ENCODING, errors='replace')
                             for name in col_names]
            else:
                col_names = []
                for field_name in table_cols:
                    if type(field_name) in (list, tuple):
                        field_name = u" & ".join(field_name)
                    if hasattr(model, 'COL_LABELS') and \
                            field_name in model.COL_LABELS:
                        field = model.COL_LABELS[field_name]
                        col_names.append(unicode(field).encode(ENCODING))
                        continue
                    else:
                        try:
                            field = model._meta.get_field(field_name)
                        except:
                            col_names.append(u"".encode(ENCODING))
                            logger.warning(
                                "**WARN get_item - csv export**: no col name "
                                "for {}\nadd explicit label to "
                                "COL_LABELS attribute of "
                                "{}".format(field_name, model))
                            continue
                        col_names.append(
                            unicode(field.verbose_name).encode(ENCODING))
            writer.writerow(col_names)
            for data in datas:
                row, delta = [], 0
                # regroup cols with join "|"
                for idx, col_name in enumerate(table_cols):
                    if len(data[1:]) <= idx + delta:
                        break
                    val = data[1:][idx + delta].encode(
                        ENCODING, errors='replace')
                    if col_name and "|" in col_name[0]:
                        for delta_idx in range(
                                len(col_name[0].split('|')) - 1):
                            delta += 1
                            val += data[1:][idx + delta].encode(
                                ENCODING, errors='replace')
                    row.append(val)
                writer.writerow(row)
            return response
        return HttpResponse('{}', content_type='text/plain')

    return func