summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--archaeological_operations/tests.py9
-rw-r--r--example_project/settings.py2
-rw-r--r--ishtar_common/admin.py25
-rw-r--r--ishtar_common/libreoffice.py16
-rw-r--r--ishtar_common/models_common.py4
-rw-r--r--ishtar_common/views_item.py119
6 files changed, 87 insertions, 88 deletions
diff --git a/archaeological_operations/tests.py b/archaeological_operations/tests.py
index 136f710f1..953229dbc 100644
--- a/archaeological_operations/tests.py
+++ b/archaeological_operations/tests.py
@@ -3245,8 +3245,10 @@ class OperationSearchTest(TestCase, OperationInitTest, SearchText, StatisticsTes
ope1.year = 2042
ope1.end_date = "2010-01-01"
+ ope1.common_name = "Opération : Château de Fougères"
ope1.save()
ope2.year = 2020
+ ope2.common_name = "Opération : Château de Josselin"
ope2.save()
# ope3.year: 2018
@@ -3340,6 +3342,13 @@ class OperationSearchTest(TestCase, OperationInitTest, SearchText, StatisticsTes
'{}*'.format(neo.label[:3]), 2, "Open search")
"""
+ # many open search
+ search_name_q = str(pgettext("key for text search", "name"))
+ result = [
+ (f'{search_name_q}="Foug*" {search_name_q}="Jossel*"', 2),
+ ]
+ self._test_search(c, result, context="Many name open search")
+
# non hierarchic search
search_remain_q = str(pgettext("key for text search", "remain"))
result = [
diff --git a/example_project/settings.py b/example_project/settings.py
index ba228d2f0..6fbff266e 100644
--- a/example_project/settings.py
+++ b/example_project/settings.py
@@ -273,7 +273,7 @@ LOGGING = {
USE_BACKGROUND_TASK = False
USE_LIBREOFFICE = False
LIBREOFFICE_PORT = 8101
-LIBREOFFICE_HOST = "localhost"
+LIBREOFFICE_HOST = "127.0.0.1"
# Ishtar custom
diff --git a/ishtar_common/admin.py b/ishtar_common/admin.py
index f3bdac97f..0b6765399 100644
--- a/ishtar_common/admin.py
+++ b/ishtar_common/admin.py
@@ -2124,22 +2124,31 @@ duplicate_importertype.short_description = _("Duplicate")
def generate_libreoffice_template(modeladmin, request, queryset):
+ c_url = (
+ reverse(
+ "admin:%s_%s_changelist"
+ % (modeladmin.model._meta.app_label, modeladmin.model._meta.model_name)
+ )
+ + "?"
+ + urllib.parse.urlencode(request.GET)
+ )
if queryset.count() != 1:
messages.add_message(
request, messages.ERROR, str(_("Select only one importer."))
)
- c_url = (
- reverse(
- "admin:%s_%s_changelist"
- % (modeladmin.model._meta.app_label, modeladmin.model._meta.model_name)
- )
- + "?"
- + urllib.parse.urlencode(request.GET)
- )
return HttpResponseRedirect(c_url)
importer_type = queryset.all()[0]
dest_filename = importer_type.get_libreoffice_template()
+ try:
+ dest_filename = importer_type.get_libreoffice_template()
+ except AttributeError:
+ messages.add_message(
+ request, messages.ERROR,
+ str(_("Error on libreoffice daemon. Contact your server administrator."))
+ )
+ return HttpResponseRedirect(c_url)
+
in_memory = BytesIO()
with open(dest_filename, "rb") as fle:
in_memory.write(fle.read())
diff --git a/ishtar_common/libreoffice.py b/ishtar_common/libreoffice.py
index 97437bd9a..2007bf895 100644
--- a/ishtar_common/libreoffice.py
+++ b/ishtar_common/libreoffice.py
@@ -9,6 +9,7 @@ from com.sun.star.beans import PropertyValue
from com.sun.star.connection import NoConnectException
from com.sun.star.sheet.ValidationType import LIST
from com.sun.star.table import BorderLineStyle
+from com.sun.star.uno import RuntimeException
# nosec: filename used is generated and sanitized
import subprocess # nosec
@@ -52,16 +53,15 @@ class UnoClient:
def connect(self):
local_context = uno.getComponentContext()
-
- resolver = local_context.ServiceManager.createInstanceWithContext(
- "com.sun.star.bridge.UnoUrlResolver", local_context)
- connection = get_connection()
+ connection_str = f"uno:socket,host={settings.LIBREOFFICE_HOST},port={settings.LIBREOFFICE_PORT};urp;StarOffice.ComponentContext"
try:
- self.service_manager = resolver.resolve(
- "uno:{};StarOffice.ServiceManager".format(connection))
- except NoConnectException:
+ self.context = local_context.ServiceManager.createInstanceWithContext(
+ "com.sun.star.bridge.UnoUrlResolver", local_context
+ ).resolve(connection_str)
+ except (NoConnectException, RuntimeException) as e:
self.service_manager = None
- # self.service_manager = self.service_manager.ServiceManager
+ return
+ self.service_manager = self.context.ServiceManager
def create_context(self):
if self.remote_context and self.desktop:
diff --git a/ishtar_common/models_common.py b/ishtar_common/models_common.py
index 4b0f1b160..573d4f8bf 100644
--- a/ishtar_common/models_common.py
+++ b/ishtar_common/models_common.py
@@ -5192,9 +5192,7 @@ class IdentifierItem(models.Model):
# search cache clean
SearchCache = apps.get_model("ishtar_common", "SearchCache")
- SearchCache.objects.filter(
- content_type=ContentType.objects.get_for_model(self.__class__)
- ).delete()
+ SearchCache.objects.all().delete()
def regenerate_all_ids(self, save=True):
if getattr(self, "_prevent_loop", False):
diff --git a/ishtar_common/views_item.py b/ishtar_common/views_item.py
index 12406669c..eb37c9d62 100644
--- a/ishtar_common/views_item.py
+++ b/ishtar_common/views_item.py
@@ -1599,6 +1599,18 @@ def _manage_hierarchic_fields(model, dct, and_reqs):
break
+def __dict_add(dct, key, value, merge_char=";"):
+ """
+ Create dict entry if no value.
+ Merge with merge_char if a value exists.
+ """
+ if key in dct:
+ dct[key] += merge_char
+ else:
+ dct[key] = ""
+ dct[key] += value
+
+
def _manage_clean_search_field(dct, exclude=None, reverse=False, related_name_fields=None):
related_names = related_name_fields if related_name_fields else []
for k in list(dct.keys()):
@@ -1609,24 +1621,35 @@ def _manage_clean_search_field(dct, exclude=None, reverse=False, related_name_fi
dct[k] = _clean_type_val(dct[k])
if "*" not in dct[k] or k.endswith("regex"):
continue
- value = dct.pop(k).strip()
+
+ values = dct.pop(k).strip().split(";")
base_key = k[:]
if k.endswith("__iexact"):
base_key = k[:-len("__iexact")]
- if value == "*":
- if k in related_names or not reverse:
- dct[base_key + "__isnull"] = False
- if exclude is not None and k.endswith("__iexact"):
+
+ for value in values:
+ # get syntax for each value
+ if "*" not in value:
+ # this value has no wild card
+ __dict_add(dct, k, value)
+ continue
+
+ if value == "*":
+ # search for exists or not exists
+ if k in related_names or not reverse:
+ dct[base_key + "__isnull"] = False
+ if exclude is not None and k.endswith("__iexact"):
+ exclude[base_key + "__exact"] = ""
+ continue
+
+ if value.startswith("*"):
+ value = value[1:]
+ if value.endswith("*"):
+ value = value[:-1]
+ if value:
+ __dict_add(dct, base_key + "__icontains", value)
+ elif exclude is not None:
exclude[base_key + "__exact"] = ""
- continue
- if value.startswith("*"):
- value = value[1:]
- if value.endswith("*"):
- value = value[:-1]
- if value:
- dct[base_key + "__icontains"] = value
- elif exclude is not None:
- exclude[base_key + "__exact"] = ""
def _get_relation_type_dict(my_relation_types_prefix, dct):
@@ -2226,60 +2249,14 @@ def _get_table_cols(request, data_type, own_table_cols, full, model):
filtered_table_cols.append(col_name)
return filtered_table_cols
-AND = " && "
-OR = " || "
-RE_GROUPS = re.compile(r"(.*?)\[\[ (.*) \]\](.*)")
-
-def _parse_operator_query_string(query):
- start_operator = "OR"
- if query.startswith(AND):
- start_operator = "AND"
- query = query[4:]
- if query.startswith(OR):
- query = query[4:]
- end_operator = "OR"
- if query.endswith(OR):
- end_operator = AND
- query = query[:-4]
- if query.endswith(OR):
- query = query[:-4]
- # parse query...
- return (start_operator, query, end_operator)
-
-
-def _parse_logic_query_string(query):
- current_group = []
- m = RE_GROUPS.match(query)
- if not m:
- return (query, None)
- start, middle, end = m.groups()
- end_operator = "OR"
- if start:
- start_operator, query, end_operator = _parse_operator_query_string(
- start)
- current_group.append((start_operator, query))
- current_group.append((end_operator, _parse_logic_query_string(middle)))
- end_operator = "OR"
- if end:
- start_operator, query, end_operator = _parse_operator_query_string(
- start)
- current_group.append((start_operator, query))
- return (end_operator, current_group)
-
def split_dict(dct):
if not dct.get("search_vector", None):
return [("OR", dct)]
-
new_dcts = []
- query = dct["search_vector"]
- new_dcts = _parse_logic_query_string(query, new_dct)
-
-
# TODO: manage || and && syntax in the same query
# example: to extract [[]] parenthesis re.findall(r"(.*)\[\[ (.*?) \]\](.*)", s)
-
split_key, split_type = " || ", "OR"
if " && " in dct["search_vector"]:
split_key, split_type = " && ", "AND"
@@ -2746,6 +2723,7 @@ def get_item(
and_reqs.append(q)
# translate submited (and default) parameters to dict and queries
+ submited_search = "" # used by cache
for k in request_keys:
val = request_items.get(k)
if not val:
@@ -2761,6 +2739,7 @@ def get_item(
pass
req_keys = request_keys[k]
+ submited_search += f'{k}="{val}"'
target = dct
if k in query_parameters:
if query_parameters[k].distinct_query:
@@ -2968,29 +2947,33 @@ def get_item(
search_vector = request_items.get("search_vector", "").strip()
# cache only for GUI search
+ cache_search = not selected_ids and (
+ submited_search or search_vector or pinned_search or any(
+ 1 for k in request_items if k.startswith("columns["))
+ )
- cache_search = search_vector or pinned_search or any(
- 1 for k in request_items if k.startswith("columns["))
- q_cached_count = None
+ items_nb = 0
if cache_search:
q_cached_count_attrs = {
"content_type": ContentType.objects.get_for_model(model),
- "query": search_vector or pinned_search or "",
+ "query": (search_vector or pinned_search or "") + submited_search,
"updated__gt": timezone.now() - datetime.timedelta(hours=24)
}
if own:
q_cached_count_attrs["ishtar_user_id"] = ishtaruser.user_ptr_id
SearchCache = apps.get_model("ishtar_common", "SearchCache")
q_cached_count = SearchCache.objects.filter(**q_cached_count_attrs)
- if cache_search and q_cached_count.exists():
- items_nb = q_cached_count.all()[0].count
- else:
+ if q_cached_count.exists():
+ items_nb = q_cached_count.all()[0].count
+ if items_nb < 1000:
+ # do not use cache if search is below 1000
+ # the search can made again
try:
q = items.values(*count_values)
items_nb = q.count() or 0
except ProgrammingError:
items_nb = 0
- if cache_search:
+ if cache_search and items_nb >= 1000:
q_cached_count_attrs.pop("updated__gt")
q_cached_count_attrs["count"] = items_nb
SearchCache.objects.create(**q_cached_count_attrs)