[mod] activate pyright checks (in CI)

We have been using a static type checker (pyright) for a long time, but its
check was not yet a prerequisite for passing the quality gate.  It was checked
in the CI, but the error messages were only logged.

As is always the case in life, with checks that you have to do but which have no
consequences; you neglect them :-)

We didn't activate the checks back then because we (even today) have too much
monkey patching in our code (not only in the engines, httpx and others objects
are also affected).

We want to replace monkey patching with clear interfaces for a long time, the
basis for this is increased typing and we can only achieve this if we make type
checking an integral part of the quality gate.

  This PR activates the type check; in order to pass the check, a few typings
  were corrected in the code, but most type inconsistencies were deactivated via
  inline comments.

This was particularly necessary in places where the code uses properties that
stick to the objects (monkey patching).  The sticking of properties only happens
in a few places, but the access to these properties extends over the entire
code, which is why there are many `# type: ignore` markers in the code ... which
we will hopefully be able to remove again successively in the future.

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2024-03-11 18:17:56 +01:00
parent 648f43be1d
commit 86b4d2f2d0
100 changed files with 236 additions and 426 deletions

View file

@ -1,2 +1,2 @@
python 3.12.0 python 3.8.18
shellcheck 0.9.0 shellcheck 0.9.0

View file

@ -69,7 +69,7 @@ test.shell:
utils/searx.sh \ utils/searx.sh \
utils/filtron.sh \ utils/filtron.sh \
utils/morty.sh utils/morty.sh
$(Q)$(MTOOLS) build_msg TEST "$@ OK" $(Q)$(MTOOLS) build_msg TEST "[shellcheck] $@ OK"
# wrap ./manage script # wrap ./manage script

View file

@ -1,7 +1,7 @@
{ {
"dependencies": { "dependencies": {
"eslint": "^9.0.0", "eslint": "^9.0.0",
"pyright": "^1.1.329" "pyright": "^1.1.353"
}, },
"scripts": { "scripts": {
"clean": "rm -Rf node_modules package-lock.json" "clean": "rm -Rf node_modules package-lock.json"

View file

@ -1,10 +0,0 @@
{
"venvPath": "local",
"venv": "py3",
"include": [
"searx",
"searxng_extra",
"tests"
],
"typeCheckingMode": "off"
}

View file

@ -1,9 +1,13 @@
{ {
"venvPath": "local", "venvPath": "local",
"venv": "py3", "venv": "py3",
"stubPath": "searx/engines/__builtins__.pyi",
"include": [ "include": [
"searx", "searx",
"searxng_extra", "searxng_extra",
"tests" "tests"
] ],
"reportPossiblyUnboundVariable": false,
"reportArgumentType": false,
"reportOptionalMemberAccess": false
} }

View file

@ -47,8 +47,8 @@ def brave(query, _lang):
results = [] results = []
if resp.ok: if resp.ok: # type: ignore
data = resp.json() data = resp.json() # type: ignore
for item in data[1]: for item in data[1]:
results.append(item) results.append(item)
return results return results
@ -62,8 +62,8 @@ def dbpedia(query, _lang):
results = [] results = []
if response.ok: if response.ok: # type: ignore
dom = lxml.etree.fromstring(response.content) dom = lxml.etree.fromstring(response.content) # type: ignore
results = dom.xpath('//Result/Label//text()') results = dom.xpath('//Result/Label//text()')
return results return results
@ -82,8 +82,8 @@ def duckduckgo(query, sxng_locale):
resp = get(url) resp = get(url)
ret_val = [] ret_val = []
if resp.ok: if resp.ok: # type: ignore
j = resp.json() j = resp.json() # type: ignore
if len(j) > 1: if len(j) > 1:
ret_val = j[1] ret_val = j[1]
return ret_val return ret_val
@ -110,11 +110,11 @@ def google_complete(query, sxng_locale):
) )
results = [] results = []
resp = get(url.format(subdomain=google_info['subdomain'], args=args)) resp = get(url.format(subdomain=google_info['subdomain'], args=args))
if resp.ok: if resp.ok: # type: ignore
json_txt = resp.text[resp.text.find('[') : resp.text.find(']', -3) + 1] json_txt = resp.text[resp.text.find('[') : resp.text.find(']', -3) + 1] # type: ignore
data = json.loads(json_txt) data = json.loads(json_txt)
for item in data[0]: for item in data[0]:
results.append(lxml.html.fromstring(item[0]).text_content()) results.append(lxml.html.fromstring(item[0]).text_content()) # type: ignore
return results return results
@ -124,7 +124,7 @@ def mwmbl(query, _lang):
# mwmbl autocompleter # mwmbl autocompleter
url = 'https://api.mwmbl.org/search/complete?{query}' url = 'https://api.mwmbl.org/search/complete?{query}'
results = get(url.format(query=urlencode({'q': query}))).json()[1] results = get(url.format(query=urlencode({'q': query}))).json()[1] # type: ignore
# results starting with `go:` are direct urls and not useful for auto completion # results starting with `go:` are direct urls and not useful for auto completion
return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")] return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")]
@ -142,10 +142,10 @@ def seznam(query, _lang):
) )
) )
if not resp.ok: if not resp.ok: # type: ignore
return [] return []
data = resp.json() data = resp.json() # type: ignore
return [ return [
''.join([part.get('text', '') for part in item.get('text', [])]) ''.join([part.get('text', '') for part in item.get('text', [])])
for item in data.get('result', []) for item in data.get('result', [])
@ -159,10 +159,10 @@ def stract(query, _lang):
resp = post(url) resp = post(url)
if not resp.ok: if not resp.ok: # type: ignore
return [] return []
return [suggestion['raw'] for suggestion in resp.json()] return [suggestion['raw'] for suggestion in resp.json()] # type: ignore
def startpage(query, sxng_locale): def startpage(query, sxng_locale):
@ -170,7 +170,7 @@ def startpage(query, sxng_locale):
lui = engines['startpage'].traits.get_language(sxng_locale, 'english') lui = engines['startpage'].traits.get_language(sxng_locale, 'english')
url = 'https://startpage.com/suggestions?{query}' url = 'https://startpage.com/suggestions?{query}'
resp = get(url.format(query=urlencode({'q': query, 'segment': 'startpage.udog', 'lui': lui}))) resp = get(url.format(query=urlencode({'q': query, 'segment': 'startpage.udog', 'lui': lui})))
data = resp.json() data = resp.json() # type: ignore
return [e['text'] for e in data.get('suggestions', []) if 'text' in e] return [e['text'] for e in data.get('suggestions', []) if 'text' in e]
@ -178,7 +178,7 @@ def swisscows(query, _lang):
# swisscows autocompleter # swisscows autocompleter
url = 'https://swisscows.ch/api/suggest?{query}&itemsCount=5' url = 'https://swisscows.ch/api/suggest?{query}&itemsCount=5'
resp = json.loads(get(url.format(query=urlencode({'query': query}))).text) resp = json.loads(get(url.format(query=urlencode({'query': query}))).text) # type: ignore
return resp return resp
@ -190,8 +190,8 @@ def qwant(query, sxng_locale):
url = 'https://api.qwant.com/v3/suggest?{query}' url = 'https://api.qwant.com/v3/suggest?{query}'
resp = get(url.format(query=urlencode({'q': query, 'locale': locale, 'version': '2'}))) resp = get(url.format(query=urlencode({'q': query, 'locale': locale, 'version': '2'})))
if resp.ok: if resp.ok: # type: ignore
data = resp.json() data = resp.json() # type: ignore
if data['status'] == 'success': if data['status'] == 'success':
for item in data['data']['items']: for item in data['data']['items']:
results.append(item['value']) results.append(item['value'])
@ -204,7 +204,7 @@ def wikipedia(query, sxng_locale):
results = [] results = []
eng_traits = engines['wikipedia'].traits eng_traits = engines['wikipedia'].traits
wiki_lang = eng_traits.get_language(sxng_locale, 'en') wiki_lang = eng_traits.get_language(sxng_locale, 'en')
wiki_netloc = eng_traits.custom['wiki_netloc'].get(wiki_lang, 'en.wikipedia.org') wiki_netloc = eng_traits.custom['wiki_netloc'].get(wiki_lang, 'en.wikipedia.org') # type: ignore
url = 'https://{wiki_netloc}/w/api.php?{args}' url = 'https://{wiki_netloc}/w/api.php?{args}'
args = urlencode( args = urlencode(
@ -218,8 +218,8 @@ def wikipedia(query, sxng_locale):
} }
) )
resp = get(url.format(args=args, wiki_netloc=wiki_netloc)) resp = get(url.format(args=args, wiki_netloc=wiki_netloc))
if resp.ok: if resp.ok: # type: ignore
data = resp.json() data = resp.json() # type: ignore
if len(data) > 1: if len(data) > 1:
results = data[1] results = data[1]
@ -230,7 +230,7 @@ def yandex(query, _lang):
# yandex autocompleter # yandex autocompleter
url = "https://suggest.yandex.com/suggest-ff.cgi?{0}" url = "https://suggest.yandex.com/suggest-ff.cgi?{0}"
resp = json.loads(get(url.format(urlencode(dict(part=query)))).text) resp = json.loads(get(url.format(urlencode(dict(part=query)))).text) # type: ignore
if len(resp) > 1: if len(resp) > 1:
return resp[1] return resp[1]
return [] return []

View file

@ -147,7 +147,7 @@ def get_token() -> str:
return '12345678' return '12345678'
token = redis_client.get(TOKEN_KEY) token = redis_client.get(TOKEN_KEY)
if token: if token:
token = token.decode('UTF-8') token = token.decode('UTF-8') # type: ignore
else: else:
token = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(16)) token = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(16))
redis_client.set(TOKEN_KEY, token, ex=TOKEN_LIVE_TIME) redis_client.set(TOKEN_KEY, token, ex=TOKEN_LIVE_TIME)

View file

@ -13,7 +13,7 @@
from __future__ import annotations from __future__ import annotations
from typing import List, Callable, TYPE_CHECKING from typing import List, TYPE_CHECKING, Callable
if TYPE_CHECKING: if TYPE_CHECKING:
from searx.enginelib import traits from searx.enginelib import traits
@ -76,7 +76,7 @@ class Engine: # pylint: disable=too-few-public-methods
# settings.yml # settings.yml
categories: List[str] categories: list[str]
"""Specifies to which :ref:`engine categories` the engine should be added.""" """Specifies to which :ref:`engine categories` the engine should be added."""
name: str name: str
@ -139,6 +139,6 @@ class Engine: # pylint: disable=too-few-public-methods
the user is used to build and send a ``Accept-Language`` header in the the user is used to build and send a ``Accept-Language`` header in the
request to the origin search engine.""" request to the origin search engine."""
tokens: List[str] tokens: list[str]
"""A list of secret tokens to make this engine *private*, more details see """A list of secret tokens to make this engine *private*, more details see
:ref:`private engines`.""" :ref:`private engines`."""

View file

@ -10,10 +10,12 @@ used.
""" """
from __future__ import annotations from __future__ import annotations
from collections.abc import Callable
import json import json
import dataclasses import dataclasses
import types import types
from typing import Dict, Literal, Iterable, Union, Callable, Optional, TYPE_CHECKING from typing import Dict, Literal, Iterable, Union, Optional, TYPE_CHECKING
from searx import locales from searx import locales
from searx.data import data_dir, ENGINE_TRAITS from searx.data import data_dir, ENGINE_TRAITS

View file

@ -41,7 +41,7 @@ def response(resp):
seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]')) seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]'))
leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]')) leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]'))
filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()')) filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()'))
filesize, filesize_multiplier = filesize_info.split() filesize, filesize_multiplier = filesize_info.split() # type: ignore
filesize = get_torrent_size(filesize, filesize_multiplier) filesize = get_torrent_size(filesize, filesize_multiplier)
results.append( results.append(

View file

@ -0,0 +1,18 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=missing-module-docstring
# Ugly hack to avoid errors from pyright when checking the engiens / sadly this
# *bultins* are now available in all modules !?!
#
# see https://github.com/microsoft/pyright/blob/main/docs/builtins.md
import searx
import searx.enginelib.traits
logger = searx.logger
traits = searx.enginelib.traits.EngineTraits()
supported_languages = None
language_aliases = None
categories = []
del searx

View file

@ -14,16 +14,14 @@ import sys
import copy import copy
from os.path import realpath, dirname from os.path import realpath, dirname
from typing import TYPE_CHECKING, Dict from typing import Dict
import types import types
import inspect import inspect
from searx import logger, settings from searx import logger, settings
from searx.enginelib import Engine
from searx.utils import load_module from searx.utils import load_module
if TYPE_CHECKING:
from searx.enginelib import Engine
logger = logger.getChild('engines') logger = logger.getChild('engines')
ENGINE_DIR = dirname(realpath(__file__)) ENGINE_DIR = dirname(realpath(__file__))
ENGINE_DEFAULT_ARGS = { ENGINE_DEFAULT_ARGS = {

View file

@ -51,7 +51,7 @@ def response(resp):
link = eval_xpath_getindex(result, './/h5/a', 0) link = eval_xpath_getindex(result, './/h5/a', 0)
url = base_url + link.attrib.get('href') + '#downloads' url = base_url + link.attrib.get('href') + '#downloads' # type: ignore
title = extract_text(link) title = extract_text(link)
img_src = base_url + eval_xpath_getindex(result, './/img/@src', 0) img_src = base_url + eval_xpath_getindex(result, './/img/@src', 0)
res = {'url': url, 'title': title, 'img_src': img_src} res = {'url': url, 'title': title, 'img_src': img_src}

View file

@ -8,7 +8,6 @@ Arch Wiki blocks access to it.
""" """
from typing import TYPE_CHECKING
from urllib.parse import urlencode, urljoin, urlparse from urllib.parse import urlencode, urljoin, urlparse
import lxml import lxml
import babel import babel
@ -17,13 +16,6 @@ from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
from searx.locales import language_tag from searx.locales import language_tag
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
about = { about = {
"website": 'https://wiki.archlinux.org/', "website": 'https://wiki.archlinux.org/',

View file

@ -5,7 +5,7 @@
from datetime import datetime from datetime import datetime
from lxml import etree from lxml import etree # type: ignore
from lxml.etree import XPath from lxml.etree import XPath
from searx.utils import eval_xpath, eval_xpath_list, eval_xpath_getindex from searx.utils import eval_xpath, eval_xpath_list, eval_xpath_getindex

View file

@ -2,7 +2,7 @@
"""Ask.com""" """Ask.com"""
from urllib.parse import urlencode from urllib.parse import urlencode
import dateutil import dateutil.parser
from lxml import html from lxml import html
from searx import utils from searx import utils

View file

@ -6,7 +6,7 @@ from datetime import datetime
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import etree from lxml import etree # type: ignore
from searx.utils import searx_useragent from searx.utils import searx_useragent
# about # about

View file

@ -26,7 +26,6 @@ category for the Chinese market.
""" """
# pylint: disable=too-many-branches, invalid-name # pylint: disable=too-many-branches, invalid-name
from typing import TYPE_CHECKING
import base64 import base64
import re import re
import time import time
@ -39,12 +38,6 @@ from searx.utils import eval_xpath, extract_text, eval_xpath_list, eval_xpath_ge
from searx.locales import language_tag, region_tag from searx.locales import language_tag, region_tag
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger = logging.getLogger()
traits: EngineTraits
about = { about = {
"website": 'https://www.bing.com', "website": 'https://www.bing.com',

View file

@ -4,7 +4,6 @@
# pylint: disable=invalid-name # pylint: disable=invalid-name
from typing import TYPE_CHECKING
import json import json
from urllib.parse import urlencode from urllib.parse import urlencode
@ -15,11 +14,6 @@ from searx.engines.bing import set_bing_cookies
from searx.engines.bing import fetch_traits # pylint: disable=unused-import from searx.engines.bing import fetch_traits # pylint: disable=unused-import
if TYPE_CHECKING:
import logging
logger = logging.getLogger()
traits: EngineTraits traits: EngineTraits
# about # about

View file

@ -9,7 +9,6 @@
# pylint: disable=invalid-name # pylint: disable=invalid-name
from typing import TYPE_CHECKING
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
@ -18,13 +17,6 @@ from searx.utils import eval_xpath, extract_text, eval_xpath_list, eval_xpath_ge
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
from searx.engines.bing import set_bing_cookies from searx.engines.bing import set_bing_cookies
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about # about
about = { about = {

View file

@ -3,24 +3,15 @@
"""Bing-Videos: description see :py:obj:`searx.engines.bing`. """Bing-Videos: description see :py:obj:`searx.engines.bing`.
""" """
from typing import TYPE_CHECKING
import json import json
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx.enginelib.traits import EngineTraits
from searx.engines.bing import set_bing_cookies from searx.engines.bing import set_bing_cookies
from searx.engines.bing import fetch_traits # pylint: disable=unused-import from searx.engines.bing import fetch_traits # pylint: disable=unused-import
from searx.engines.bing_images import time_map from searx.engines.bing_images import time_map
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
about = { about = {
"website": 'https://www.bing.com/videos', "website": 'https://www.bing.com/videos',

View file

@ -118,7 +118,7 @@ Implementations
""" """
from typing import Any, TYPE_CHECKING from typing import Any
from urllib.parse import ( from urllib.parse import (
urlencode, urlencode,
@ -139,13 +139,6 @@ from searx.utils import (
) )
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
about = { about = {
"website": 'https://search.brave.com/', "website": 'https://search.brave.com/',
"wikidata_id": 'Q22906900', "wikidata_id": 'Q22906900',
@ -228,10 +221,10 @@ def request(query, params):
params['cookies']['useLocation'] = '0' params['cookies']['useLocation'] = '0'
params['cookies']['summarizer'] = '0' params['cookies']['summarizer'] = '0'
engine_region = traits.get_region(params['searxng_locale'], 'all') engine_region = traits.get_region(params['searxng_locale'], 'all') # type: ignore
params['cookies']['country'] = engine_region.split('-')[-1].lower() # type: ignore params['cookies']['country'] = engine_region.split('-')[-1].lower() # type: ignore
ui_lang = locales.get_engine_locale(params['searxng_locale'], traits.custom["ui_lang"], 'en-us') ui_lang = locales.get_engine_locale(params['searxng_locale'], traits.custom["ui_lang"], 'en-us') # type: ignore
params['cookies']['ui_lang'] = ui_lang params['cookies']['ui_lang'] = ui_lang
logger.debug("cookies %s", params['cookies']) logger.debug("cookies %s", params['cookies'])

View file

@ -40,7 +40,7 @@ import re
from datetime import datetime from datetime import datetime
from urllib.parse import quote from urllib.parse import quote
from lxml import etree from lxml import etree # type: ignore
from searx.utils import get_torrent_size from searx.utils import get_torrent_size

View file

@ -56,7 +56,7 @@ def response(resp):
content = html.tostring(excerpt, encoding='unicode', method='text', with_tail=False) content = html.tostring(excerpt, encoding='unicode', method='text', with_tail=False)
# it is better to emit <br/> instead of |, but html tags are verboten # it is better to emit <br/> instead of |, but html tags are verboten
content = content.strip().replace('\n', ' | ') content = content.strip().replace('\n', ' | ')
content = ' '.join(content.split()) content = ' '.join(content.split()) # type: ignore
filesize = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[0] filesize = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[0]
filesize_multiplier = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[1] filesize_multiplier = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[1]

View file

@ -10,8 +10,6 @@ Dailymotion (Videos)
""" """
from typing import TYPE_CHECKING
from datetime import datetime, timedelta from datetime import datetime, timedelta
from urllib.parse import urlencode from urllib.parse import urlencode
import time import time
@ -23,14 +21,7 @@ from searx.exceptions import SearxEngineAPIException
from searx.locales import region_tag, language_tag from searx.locales import region_tag, language_tag
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about
about = { about = {
"website": 'https://www.dailymotion.com', "website": 'https://www.dailymotion.com',
"wikidata_id": 'Q769222', "wikidata_id": 'Q769222',

View file

@ -57,10 +57,10 @@ def response(resp):
results.append( results.append(
{ {
'url': base_url + "/" + extract_text(eval_xpath(result, url_xpath)), 'url': base_url + "/" + extract_text(eval_xpath(result, url_xpath)), # type: ignore
'title': extract_text(eval_xpath(result, title_xpath)), 'title': extract_text(eval_xpath(result, title_xpath)),
'content': extract_text(eval_xpath(result, content_xpath)), 'content': extract_text(eval_xpath(result, content_xpath)),
'metadata': ', '.join(metadata), 'metadata': ', '.join(metadata), # type: ignore
} }
) )

View file

@ -4,7 +4,6 @@ DuckDuckGo Lite
~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~
""" """
from typing import TYPE_CHECKING
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
import json import json
@ -25,13 +24,6 @@ from searx.network import get # see https://github.com/searxng/searxng/issues/7
from searx import redisdb from searx import redisdb
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
about = { about = {
"website": 'https://lite.duckduckgo.com/lite/', "website": 'https://lite.duckduckgo.com/lite/',
"wikidata_id": 'Q12805', "wikidata_id": 'Q12805',
@ -110,7 +102,7 @@ def get_vqd(query):
key = 'SearXNG_ddg_web_vqd' + redislib.secret_hash(query) key = 'SearXNG_ddg_web_vqd' + redislib.secret_hash(query)
value = c.get(key) value = c.get(key)
if value or value == b'': if value or value == b'':
value = value.decode('utf-8') value = value.decode('utf-8') # type: ignore
logger.debug("re-use cached vqd value: %s", value) logger.debug("re-use cached vqd value: %s", value)
return value return value
@ -129,7 +121,7 @@ def get_vqd(query):
return value return value
def get_ddg_lang(eng_traits: EngineTraits, sxng_locale, default='en_US'): def get_ddg_lang(eng_traits: EngineTraits, sxng_locale, default: str = 'en_US') -> str:
"""Get DuckDuckGo's language identifier from SearXNG's locale. """Get DuckDuckGo's language identifier from SearXNG's locale.
DuckDuckGo defines its languages by region codes (see DuckDuckGo defines its languages by region codes (see

View file

@ -13,8 +13,6 @@ most of the features are based on English terms.
""" """
from typing import TYPE_CHECKING
from urllib.parse import urlencode, urlparse, urljoin from urllib.parse import urlencode, urlparse, urljoin
from lxml import html from lxml import html
@ -22,12 +20,7 @@ from searx.data import WIKIDATA_UNITS
from searx.utils import extract_text, html_to_text, get_string_replaces_function from searx.utils import extract_text, html_to_text, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
if TYPE_CHECKING:
import logging
logger: logging.Logger
# about
about = { about = {
"website": 'https://duckduckgo.com/', "website": 'https://duckduckgo.com/',
"wikidata_id": 'Q12805', "wikidata_id": 'Q12805',

View file

@ -5,7 +5,6 @@ DuckDuckGo Extra (images, videos, news)
""" """
from datetime import datetime from datetime import datetime
from typing import TYPE_CHECKING
from urllib.parse import urlencode from urllib.parse import urlencode
from searx.engines.duckduckgo import fetch_traits # pylint: disable=unused-import from searx.engines.duckduckgo import fetch_traits # pylint: disable=unused-import
@ -13,16 +12,8 @@ from searx.engines.duckduckgo import (
get_ddg_lang, get_ddg_lang,
get_vqd, get_vqd,
) )
from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about
about = { about = {
"website": 'https://duckduckgo.com/', "website": 'https://duckduckgo.com/',
"wikidata_id": 'Q12805', "wikidata_id": 'Q12805',

View file

@ -4,7 +4,6 @@ DuckDuckGo Weather
~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~
""" """
from typing import TYPE_CHECKING
from json import loads from json import loads
from urllib.parse import quote from urllib.parse import quote
@ -13,15 +12,6 @@ from flask_babel import gettext
from searx.engines.duckduckgo import fetch_traits # pylint: disable=unused-import from searx.engines.duckduckgo import fetch_traits # pylint: disable=unused-import
from searx.engines.duckduckgo import get_ddg_lang from searx.engines.duckduckgo import get_ddg_lang
from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
about = { about = {
"website": 'https://duckduckgo.com/', "website": 'https://duckduckgo.com/',

View file

@ -3,20 +3,13 @@
""" """
from typing import TYPE_CHECKING
import json import json
from time import time from time import time
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
from searx.utils import ecma_unescape, html_to_text from searx.utils import ecma_unescape, html_to_text
if TYPE_CHECKING:
import logging
logger: logging.Logger
# about
about = { about = {
"website": 'https://www.flickr.com', "website": 'https://www.flickr.com',
"wikidata_id": 'Q103204', "wikidata_id": 'Q103204',

View file

@ -46,7 +46,7 @@ def response(resp):
for result in eval_xpath_list(dom, results_xpath): for result in eval_xpath_list(dom, results_xpath):
results.append( results.append(
{ {
'url': base_url + extract_text(eval_xpath(result, url_xpath)), 'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
'title': extract_text(eval_xpath(result, title_xpath)), 'title': extract_text(eval_xpath(result, title_xpath)),
'img_src': extract_text(eval_xpath(result, thumbnail_xpath)), 'img_src': extract_text(eval_xpath(result, thumbnail_xpath)),
'content': extract_text(eval_xpath(result, info_text_xpath)), 'content': extract_text(eval_xpath(result, info_text_xpath)),

View file

@ -11,8 +11,6 @@ engines:
""" """
from typing import TYPE_CHECKING
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
@ -26,14 +24,6 @@ from searx.network import get # see https://github.com/searxng/searxng/issues/7
from searx.exceptions import SearxEngineCaptchaException from searx.exceptions import SearxEngineCaptchaException
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about # about
about = { about = {
"website": 'https://www.google.com', "website": 'https://www.google.com',

View file

@ -13,8 +13,6 @@ This internal API offer results in
.. _Protobuf: https://en.wikipedia.org/wiki/Protocol_Buffers .. _Protobuf: https://en.wikipedia.org/wiki/Protocol_Buffers
""" """
from typing import TYPE_CHECKING
from urllib.parse import urlencode from urllib.parse import urlencode
from json import loads from json import loads
@ -25,15 +23,7 @@ from searx.engines.google import (
detect_google_sorry, detect_google_sorry,
) )
if TYPE_CHECKING:
import logging
from searx.enginelib.traits import EngineTraits
logger: logging.Logger
traits: EngineTraits
# about
about = { about = {
"website": 'https://images.google.com', "website": 'https://images.google.com',
"wikidata_id": 'Q521550', "wikidata_id": 'Q521550',

View file

@ -24,8 +24,6 @@ The google news API ignores some parameters from the common :ref:`google API`:
.. _save: https://developers.google.com/custom-search/docs/xml_results#safesp .. _save: https://developers.google.com/custom-search/docs/xml_results#safesp
""" """
from typing import TYPE_CHECKING
from urllib.parse import urlencode from urllib.parse import urlencode
import base64 import base64
from lxml import html from lxml import html
@ -46,13 +44,6 @@ from searx.engines.google import (
) )
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about # about
about = { about = {
"website": 'https://news.google.com', "website": 'https://news.google.com',
@ -301,4 +292,4 @@ def fetch_traits(engine_traits: EngineTraits):
print("ERROR: %s -> %s is unknown by babel" % (ceid, sxng_locale)) print("ERROR: %s -> %s is unknown by babel" % (ceid, sxng_locale))
continue continue
engine_traits.custom['ceid'][locales.region_tag(locale)] = ceid engine_traits.custom['ceid'][locales.region_tag(locale)] = ceid # type: ignore

View file

@ -7,7 +7,6 @@ can make use of the :ref:`google API` to assemble the arguments of the GET
request. request.
""" """
from typing import TYPE_CHECKING
from typing import Optional from typing import Optional
from urllib.parse import urlencode from urllib.parse import urlencode
@ -28,14 +27,6 @@ from searx.engines.google import (
get_google_info, get_google_info,
time_range_dict, time_range_dict,
) )
from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about # about
about = { about = {

View file

@ -13,8 +13,6 @@
""" """
from typing import TYPE_CHECKING
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
@ -33,14 +31,6 @@ from searx.engines.google import (
suggestion_xpath, suggestion_xpath,
detect_google_sorry, detect_google_sorry,
) )
from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about # about
about = { about = {

View file

@ -55,7 +55,7 @@ def response(resp):
results.append( results.append(
{ {
'template': 'images.html', 'template': 'images.html',
'url': base_url + extract_text(eval_xpath(result, url_xpath)), 'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
'title': extract_text(eval_xpath(result, title_xpath)), 'title': extract_text(eval_xpath(result, title_xpath)),
'img_src': img_src, 'img_src': img_src,
'thumbnail_src': thumbnail_src, 'thumbnail_src': thumbnail_src,

View file

@ -1,6 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later # SPDX-License-Identifier: AGPL-3.0-or-later
""" """INA (Videos)
INA (Videos)
""" """
from html import unescape from html import unescape
@ -58,7 +58,7 @@ def response(resp):
thumbnail = extract_text(eval_xpath(result, thumbnail_xpath)) thumbnail = extract_text(eval_xpath(result, thumbnail_xpath))
content = extract_text(eval_xpath(result, publishedDate_xpath)) + extract_text( content = extract_text(eval_xpath(result, publishedDate_xpath)) + extract_text(
eval_xpath(result, content_xpath) eval_xpath(result, content_xpath)
) ) # type: ignore
# append result # append result
results.append( results.append(

View file

@ -17,7 +17,7 @@ from urllib.parse import urlencode
from searx.utils import to_string, html_to_text from searx.utils import to_string, html_to_text
search_url = None search_url: str = ''
url_query = None url_query = None
url_prefix = "" url_prefix = ""
content_query = None content_query = None

View file

@ -33,22 +33,13 @@ Implementations
""" """
from __future__ import annotations from __future__ import annotations
from typing import TYPE_CHECKING
from datetime import datetime from datetime import datetime
from urllib.parse import urlencode, quote from urllib.parse import urlencode, quote
from searx.utils import html_to_text from searx.utils import html_to_text
from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about
about = { about = {
"website": None, "website": None,
"wikidata_id": None, "wikidata_id": None,

View file

@ -20,7 +20,6 @@ Otherwise, follow instructions provided by Mullvad for enabling the VPN on Linux
update of SearXNG! update of SearXNG!
""" """
from typing import TYPE_CHECKING
from httpx import Response from httpx import Response
from lxml import html from lxml import html
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
@ -28,18 +27,10 @@ from searx.locales import region_tag, get_official_locales
from searx.utils import eval_xpath, extract_text, eval_xpath_list from searx.utils import eval_xpath, extract_text, eval_xpath_list
from searx.exceptions import SearxEngineResponseException from searx.exceptions import SearxEngineResponseException
if TYPE_CHECKING:
import logging
logger = logging.getLogger()
traits: EngineTraits
use_cache: bool = True # non-cache use only has 100 searches per day! use_cache: bool = True # non-cache use only has 100 searches per day!
search_url = "https://leta.mullvad.net" search_url = "https://leta.mullvad.net"
# about
about = { about = {
"website": search_url, "website": search_url,
"wikidata_id": 'Q47008412', # the Mullvad id - not leta, but related "wikidata_id": 'Q47008412', # the Mullvad id - not leta, but related
@ -145,7 +136,7 @@ def fetch_traits(engine_traits: EngineTraits):
if not isinstance(resp, Response): if not isinstance(resp, Response):
print("ERROR: failed to get response from mullvad-leta. Are you connected to the VPN?") print("ERROR: failed to get response from mullvad-leta. Are you connected to the VPN?")
return return
if not resp.ok: if not resp.ok: # type: ignore
print("ERROR: response from mullvad-leta is not OK. Are you connected to the VPN?") print("ERROR: response from mullvad-leta is not OK. Are you connected to the VPN?")
return return
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)

View file

@ -14,8 +14,6 @@ from searx.network import get
from searx.locales import language_tag from searx.locales import language_tag
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
traits: EngineTraits
# Engine metadata # Engine metadata
about = { about = {
"website": "https://odysee.com/", "website": "https://odysee.com/",
@ -122,11 +120,11 @@ def fetch_traits(engine_traits: EngineTraits):
timeout=60, timeout=60,
) )
if not resp.ok: if not resp.ok: # type: ignore
print("ERROR: can't determine languages from Odysee") print("ERROR: can't determine languages from Odysee")
return return
for line in resp.text.split("\n")[1:-4]: for line in resp.text.split("\n")[1:-4]: # type: ignore
lang_tag = line.strip().split(": ")[0].replace("'", "") lang_tag = line.strip().split(": ")[0].replace("'", "")
try: try:

View file

@ -17,8 +17,6 @@ from searx.locales import language_tag
from searx.utils import html_to_text from searx.utils import html_to_text
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
traits: EngineTraits
about = { about = {
# pylint: disable=line-too-long # pylint: disable=line-too-long
"website": 'https://joinpeertube.org', "website": 'https://joinpeertube.org',

View file

@ -5,7 +5,7 @@ import re
from urllib.parse import urlencode from urllib.parse import urlencode
from dateutil import parser from dateutil import parser
import babel import babel.numbers
import flask_babel import flask_babel
from lxml import html from lxml import html
from searx.utils import eval_xpath, eval_xpath_list, extract_text from searx.utils import eval_xpath, eval_xpath_list, extract_text
@ -69,14 +69,18 @@ def response(resp):
results.append( results.append(
{ {
'template': 'packages.html', 'template': 'packages.html',
'url': base_url + extract_text(eval_xpath(result, url_xpath)), 'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
'title': extract_text(eval_xpath(result, title_xpath)), 'title': extract_text(eval_xpath(result, title_xpath)),
'content': extract_text(eval_xpath(result, content_xpath)), 'content': extract_text(eval_xpath(result, content_xpath)),
'package_name': re.sub(r"\(|\)", "", extract_text(eval_xpath(result, package_name_xpath))), 'package_name': re.sub(
r"\(|\)",
"",
extract_text(eval_xpath(result, package_name_xpath)),
), # type: ignore
'version': extract_text(eval_xpath(result, version_xpath)), 'version': extract_text(eval_xpath(result, version_xpath)),
'popularity': popularity, 'popularity': popularity,
'license_name': extract_text(eval_xpath(result, license_name_xpath)), 'license_name': extract_text(eval_xpath(result, license_name_xpath)),
'license_url': base_url + extract_text(eval_xpath(result, license_url_xpath)), 'license_url': base_url + extract_text(eval_xpath(result, license_url_xpath)), # type: ignore
'publishedDate': publishedDate, 'publishedDate': publishedDate,
} }
) )

View file

@ -63,7 +63,7 @@ def search(query, params):
query_params = {'query': query} query_params = {'query': query}
query_to_run = query_str + ' LIMIT {0} OFFSET {1}'.format(limit, (params['pageno'] - 1) * limit) query_to_run = query_str + ' LIMIT {0} OFFSET {1}'.format(limit, (params['pageno'] - 1) * limit)
with _connection: with _connection: # type: ignore
with _connection.cursor() as cur: with _connection.cursor() as cur:
cur.execute(query_to_run, query_params) cur.execute(query_to_run, query_params)
return _fetch_results(cur) return _fetch_results(cur)

View file

@ -128,7 +128,7 @@ def _get_request_id(query, params):
# performs an IP-based geolocation of the user, we don't want that in # performs an IP-based geolocation of the user, we don't want that in
# SearXNG ;-) # SearXNG ;-)
if l.territory: if l and l.territory:
headers['Accept-Language'] = f"{l.language}-{l.territory},{l.language};" "q=0.9,*;" "q=0.5" headers['Accept-Language'] = f"{l.language}-{l.territory},{l.language};" "q=0.9,*;" "q=0.5"
resp_text = get(url, headers=headers).text # type: ignore resp_text = get(url, headers=headers).text # type: ignore

View file

@ -6,7 +6,7 @@
from datetime import datetime from datetime import datetime
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import etree from lxml import etree # type: ignore
from searx.network import get from searx.network import get
from searx.utils import ( from searx.utils import (
eval_xpath_getindex, eval_xpath_getindex,
@ -77,8 +77,8 @@ def response(resp): # pylint: disable=too-many-locals
for entry in eval_xpath_list(search_results, '//PubmedArticle'): for entry in eval_xpath_list(search_results, '//PubmedArticle'):
medline = eval_xpath_getindex(entry, './MedlineCitation', 0) medline = eval_xpath_getindex(entry, './MedlineCitation', 0)
title = eval_xpath_getindex(medline, './/Article/ArticleTitle', 0).text title = eval_xpath_getindex(medline, './/Article/ArticleTitle', 0).text # type: ignore
pmid = eval_xpath_getindex(medline, './/PMID', 0).text pmid = eval_xpath_getindex(medline, './/PMID', 0).text # type: ignore
url = pubmed_url + pmid url = pubmed_url + pmid
content = extract_text( content = extract_text(
eval_xpath_getindex(medline, './/Abstract/AbstractText//text()', 0, default=None), allow_none=True eval_xpath_getindex(medline, './/Abstract/AbstractText//text()', 0, default=None), allow_none=True
@ -120,7 +120,7 @@ def response(resp): # pylint: disable=too-many-locals
day = eval_xpath_getindex(accepted_date, './Day', 0) day = eval_xpath_getindex(accepted_date, './Day', 0)
try: try:
publishedDate = datetime.strptime( publishedDate = datetime.strptime(
year.text + '-' + month.text + '-' + day.text, year.text + '-' + month.text + '-' + day.text, # type: ignore
'%Y-%m-%d', '%Y-%m-%d',
) )
res_dict['publishedDate'] = publishedDate res_dict['publishedDate'] = publishedDate

View file

@ -47,7 +47,7 @@ from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from flask_babel import gettext from flask_babel import gettext
import babel import babel
import lxml import lxml.html
from searx.exceptions import SearxEngineAPIException, SearxEngineTooManyRequestsException from searx.exceptions import SearxEngineAPIException, SearxEngineTooManyRequestsException
from searx.network import raise_for_httperror from searx.network import raise_for_httperror
@ -59,8 +59,6 @@ from searx.utils import (
extract_text, extract_text,
) )
traits: EngineTraits
# about # about
about = { about = {
"website": 'https://www.qwant.com/', "website": 'https://www.qwant.com/',

View file

@ -66,9 +66,9 @@ paging = True
time_range_support = True time_range_support = True
# parameters from settings.yml # parameters from settings.yml
base_url = None base_url: str = ''
search_dir = '' search_dir = ''
mount_prefix = None mount_prefix: str = ''
dl_prefix = None dl_prefix = None
# embedded # embedded

View file

@ -69,7 +69,7 @@ def search(query, _params):
ret = _redis_client.hgetall(query) ret = _redis_client.hgetall(query)
if ret: if ret:
ret['template'] = result_template ret['template'] = result_template # type: ignore
return [ret] return [ret]
if ' ' in query: if ' ' in query:
@ -98,7 +98,7 @@ def search_keys(query):
res = dict(enumerate(_redis_client.lrange(key, 0, -1))) res = dict(enumerate(_redis_client.lrange(key, 0, -1)))
if res: if res:
res['template'] = result_template res['template'] = result_template # type: ignore
res['redis_key'] = key res['redis_key'] = key # type: ignore
ret.append(res) ret.append(res)
return ret return ret

View file

@ -55,7 +55,7 @@ def response(resp):
return [] return []
for result_dom in results_dom: for result_dom in results_dom:
url = base_url + extract_text(result_dom.xpath(url_xpath)) url = base_url + extract_text(result_dom.xpath(url_xpath)) # type: ignore
thumbnail = extract_text(result_dom.xpath(thumbnail_xpath)) thumbnail = extract_text(result_dom.xpath(thumbnail_xpath))
title = extract_text(result_dom.xpath(title_xpath)) title = extract_text(result_dom.xpath(title_xpath))
p_date = extract_text(result_dom.xpath(published_date)) p_date = extract_text(result_dom.xpath(published_date))

View file

@ -5,8 +5,6 @@ peertube engines.
""" """
from typing import TYPE_CHECKING
from urllib.parse import urlencode from urllib.parse import urlencode
from datetime import datetime from datetime import datetime
@ -17,14 +15,6 @@ from searx.engines.peertube import (
safesearch_table, safesearch_table,
time_range_table, time_range_table,
) )
from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
about = { about = {
# pylint: disable=line-too-long # pylint: disable=line-too-long

View file

@ -44,7 +44,7 @@ guest_client_id = ''
def get_client_id(): def get_client_id():
resp = http_get("https://soundcloud.com") resp = http_get("https://soundcloud.com")
if resp.ok: if resp.ok: # type: ignore
tree = html.fromstring(resp.content) tree = html.fromstring(resp.content)
# script_tags has been moved from /assets/app/ to /assets/ path. I # script_tags has been moved from /assets/app/ to /assets/ path. I
# found client_id in https://a-v2.sndcdn.com/assets/49-a0c01933-3.js # found client_id in https://a-v2.sndcdn.com/assets/49-a0c01933-3.js
@ -55,7 +55,7 @@ def get_client_id():
for app_js_url in app_js_urls[::-1]: for app_js_url in app_js_urls[::-1]:
# gets app_js and searches for the clientid # gets app_js and searches for the clientid
resp = http_get(app_js_url) resp = http_get(app_js_url)
if resp.ok: if resp.ok: # type: ignore
cids = cid_re.search(resp.content.decode()) cids = cid_re.search(resp.content.decode())
if cids is not None and len(cids.groups()): if cids is not None and len(cids.groups()):
return cids.groups()[0] return cids.groups()[0]

View file

@ -79,7 +79,6 @@ Startpage's category (for Web-search, News, Videos, ..) is set by
""" """
from typing import TYPE_CHECKING
from collections import OrderedDict from collections import OrderedDict
import re import re
from unicodedata import normalize, combining from unicodedata import normalize, combining
@ -96,14 +95,7 @@ from searx.exceptions import SearxEngineCaptchaException
from searx.locales import region_tag from searx.locales import region_tag
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about
about = { about = {
"website": 'https://startpage.com', "website": 'https://startpage.com',
"wikidata_id": 'Q2333295', "wikidata_id": 'Q2333295',

View file

@ -36,7 +36,7 @@ def response(resp):
results.append( results.append(
{ {
'template': 'images.html', 'template': 'images.html',
'url': base_url + extract_text(eval_xpath(result, url_xpath)), 'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
'title': extract_text(eval_xpath(result, title_xpath)).replace(" SVG File", "").replace("Show ", ""), 'title': extract_text(eval_xpath(result, title_xpath)).replace(" SVG File", "").replace("Show ", ""),
'img_src': extract_text(eval_xpath(result, img_src_xpath)), 'img_src': extract_text(eval_xpath(result, img_src_xpath)),
} }

View file

@ -15,16 +15,11 @@ This SearXNG engine uses the `/api2u/search`_ API.
.. _OpenAPI: https://swagger.io/specification/ .. _OpenAPI: https://swagger.io/specification/
""" """
from typing import TYPE_CHECKING
from datetime import datetime from datetime import datetime
from urllib.parse import urlencode from urllib.parse import urlencode
import re import re
if TYPE_CHECKING:
import logging
logger: logging.Logger
about = { about = {
'website': "https://tagesschau.de", 'website': "https://tagesschau.de",

View file

@ -166,7 +166,7 @@ def response(resp):
message = 'HTTP status: %s' % resp.status_code message = 'HTTP status: %s' % resp.status_code
error = json_data.get('error') error = json_data.get('error')
s_key = json_data.get('suggestions', {}).get('key', '') s_key = json_data.get('suggestions', {}).get('key', '') # type: ignore
if error and s_key: if error and s_key:
message = "%s (%s)" % (error, s_key) message = "%s (%s)" % (error, s_key)

View file

@ -48,21 +48,16 @@ Implementations
""" """
from __future__ import annotations from __future__ import annotations
from typing import TYPE_CHECKING
from typing import List, Dict, Any from typing import List, Dict, Any
from datetime import datetime from datetime import datetime
from urllib.parse import quote from urllib.parse import quote
from lxml import etree # type: ignore from lxml import etree # type: ignore
import httpx
from searx.exceptions import SearxEngineAPIException from searx.exceptions import SearxEngineAPIException
if TYPE_CHECKING:
import httpx
import logging
logger: logging.Logger
# engine settings # engine settings
about: Dict[str, Any] = { about: Dict[str, Any] = {
"website": None, "website": None,

View file

@ -5,7 +5,6 @@ from :ref:`wikipedia engine`.
""" """
# pylint: disable=missing-class-docstring # pylint: disable=missing-class-docstring
from typing import TYPE_CHECKING
from hashlib import md5 from hashlib import md5
from urllib.parse import urlencode, unquote from urllib.parse import urlencode, unquote
from json import loads from json import loads
@ -23,14 +22,6 @@ from searx.engines.wikipedia import (
) )
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
if TYPE_CHECKING:
import logging
logger: logging.Logger
traits: EngineTraits
# about
about = { about = {
"website": 'https://wikidata.org/', "website": 'https://wikidata.org/',
"wikidata_id": 'Q2013', "wikidata_id": 'Q2013',
@ -142,7 +133,7 @@ def get_headers():
return {'Accept': 'application/sparql-results+json', 'User-Agent': searx_useragent()} return {'Accept': 'application/sparql-results+json', 'User-Agent': searx_useragent()}
def get_label_for_entity(entity_id, language): def get_label_for_entity(entity_id, language): # type: ignore
name = WIKIDATA_PROPERTIES.get(entity_id) name = WIKIDATA_PROPERTIES.get(entity_id)
if name is None: if name is None:
name = WIKIDATA_PROPERTIES.get((entity_id, language)) name = WIKIDATA_PROPERTIES.get((entity_id, language))
@ -497,7 +488,7 @@ class WDAttribute:
def __init__(self, name): def __init__(self, name):
self.name = name self.name = name
def get_select(self): def get_select(self) -> str:
return '(group_concat(distinct ?{name};separator=", ") as ?{name}s)'.replace('{name}', self.name) return '(group_concat(distinct ?{name};separator=", ") as ?{name}s)'.replace('{name}', self.name)
def get_label(self, language): def get_label(self, language):
@ -506,10 +497,10 @@ class WDAttribute:
def get_where(self): def get_where(self):
return "OPTIONAL { ?item wdt:{name} ?{name} . }".replace('{name}', self.name) return "OPTIONAL { ?item wdt:{name} ?{name} . }".replace('{name}', self.name)
def get_wikibase_label(self): def get_wikibase_label(self) -> str:
return "" return ""
def get_group_by(self): def get_group_by(self) -> str:
return "" return ""
def get_str(self, result, language): # pylint: disable=unused-argument def get_str(self, result, language): # pylint: disable=unused-argument
@ -702,7 +693,7 @@ class WDDateAttribute(WDAttribute):
# precision: minute # precision: minute
return ( return (
get_datetime_format(format, locale=locale) get_datetime_format(format, locale=locale)
.replace("'", "") .replace("'", "") # type: ignore
.replace('{0}', format_time(timestamp, 'full', tzinfo=None, locale=locale)) .replace('{0}', format_time(timestamp, 'full', tzinfo=None, locale=locale))
.replace('{1}', format_date(timestamp, 'short', locale=locale)) .replace('{1}', format_date(timestamp, 'short', locale=locale))
) )

View file

@ -64,8 +64,6 @@ from searx import network as _network
from searx import locales from searx import locales
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
traits: EngineTraits
# about # about
about = { about = {
"website": 'https://www.wikipedia.org/', "website": 'https://www.wikipedia.org/',
@ -277,7 +275,7 @@ def fetch_wikimedia_traits(engine_traits: EngineTraits):
engine_traits.regions[sxng_tag] = eng_tag engine_traits.regions[sxng_tag] = eng_tag
resp = _network.get(list_of_wikipedias) resp = _network.get(list_of_wikipedias)
if not resp.ok: if not resp.ok: # type: ignore
print("ERROR: response from Wikipedia is not OK.") print("ERROR: response from Wikipedia is not OK.")
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)

View file

@ -5,7 +5,7 @@
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import etree from lxml import etree # type: ignore
# about # about
about = { about = {

View file

@ -4,7 +4,7 @@
""" """
from urllib.parse import urlencode, urljoin from urllib.parse import urlencode, urljoin
from lxml import html, etree from lxml import html, etree # type: ignore
from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex

View file

@ -73,7 +73,7 @@ from lxml import html
from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
from searx.network import raise_for_httperror from searx.network import raise_for_httperror
search_url = None search_url = ''
""" """
Search URL of the engine. Example:: Search URL of the engine. Example::
@ -270,7 +270,9 @@ def response(resp): # pylint: disable=too-many-branches
# add alternative cached url if available # add alternative cached url if available
if cached_xpath: if cached_xpath:
tmp_result['cached_url'] = cached_url + extract_text(eval_xpath_list(result, cached_xpath, min_len=1)) tmp_result['cached_url'] = cached_url + extract_text(
eval_xpath_list(result, cached_xpath, min_len=1)
) # type: ignore
if is_onion: if is_onion:
tmp_result['is_onion'] = True tmp_result['is_onion'] = True
@ -290,7 +292,7 @@ def response(resp): # pylint: disable=too-many-branches
'url': url, 'url': url,
'title': title, 'title': title,
'content': content, 'content': content,
'cached_url': cached_url + cached, 'cached_url': cached_url + cached, # type: ignore
'is_onion': is_onion, 'is_onion': is_onion,
} }
) )

View file

@ -19,8 +19,6 @@ from searx.utils import (
) )
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
traits: EngineTraits
# about # about
about = { about = {
"website": 'https://search.yahoo.com/', "website": 'https://search.yahoo.com/',
@ -86,7 +84,7 @@ def request(query, params):
'p': query, 'p': query,
'ei': 'UTF-8', 'ei': 'UTF-8',
'fl': 1, 'fl': 1,
'vl': 'lang_' + lang, 'vl': 'lang_' + lang, # type: ignore
'btf': btf, 'btf': btf,
'fr2': 'time', 'fr2': 'time',
'age': age, 'age': age,
@ -95,7 +93,7 @@ def request(query, params):
} }
) )
domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang) domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang) # type: ignore
params['url'] = 'https://%s/search?%s' % (domain, args) params['url'] = 'https://%s/search?%s' % (domain, args)
return params return params
@ -158,7 +156,7 @@ def fetch_traits(engine_traits: EngineTraits):
engine_traits.all_locale = 'any' engine_traits.all_locale = 'any'
resp = network.get('https://search.yahoo.com/preferences/languages') resp = network.get('https://search.yahoo.com/preferences/languages')
if not resp.ok: if not resp.ok: # type: ignore
print("ERROR: response from yahoo is not OK.") print("ERROR: response from yahoo is not OK.")
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)

View file

@ -82,7 +82,7 @@ def response(resp):
item = {'url': url, 'title': title, 'content': content, 'img_src': img_src} item = {'url': url, 'title': title, 'content': content, 'img_src': img_src}
pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]')) pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]'))
ago = AGO_RE.search(pub_date) ago = AGO_RE.search(pub_date) # type: ignore
if ago: if ago:
number = int(ago.group(1)) number = int(ago.group(1))
delta = AGO_TIMEDELTA[ago.group(2)] delta = AGO_TIMEDELTA[ago.group(2)]

View file

@ -32,11 +32,13 @@ Implementations
=============== ===============
""" """
from __future__ import annotations from __future__ import annotations
from typing import TYPE_CHECKING
from typing import List, Dict, Any, Optional from typing import List, Dict, Any, Optional
from datetime import datetime from datetime import datetime
from urllib.parse import quote from urllib.parse import quote
import httpx
from lxml import html from lxml import html
from flask_babel import gettext from flask_babel import gettext
@ -44,13 +46,7 @@ from searx.utils import extract_text, eval_xpath, eval_xpath_list
from searx.enginelib.traits import EngineTraits from searx.enginelib.traits import EngineTraits
from searx.data import ENGINE_TRAITS from searx.data import ENGINE_TRAITS
if TYPE_CHECKING:
import httpx
import logging
logger: logging.Logger
# about
about: Dict[str, Any] = { about: Dict[str, Any] = {
"website": "https://zlibrary-global.se", "website": "https://zlibrary-global.se",
"wikidata_id": "Q104863992", "wikidata_id": "Q104863992",

View file

@ -61,7 +61,7 @@ class SearxEngineAccessDeniedException(SearxEngineResponseException):
"""This settings contains the default suspended time (default 86400 sec / 1 """This settings contains the default suspended time (default 86400 sec / 1
day).""" day)."""
def __init__(self, suspended_time: int = None, message: str = 'Access denied'): def __init__(self, suspended_time: int = 0, message: str = 'Access denied'):
"""Generic exception to raise when an engine denies access to the results. """Generic exception to raise when an engine denies access to the results.
:param suspended_time: How long the engine is going to be suspended in :param suspended_time: How long the engine is going to be suspended in
@ -75,10 +75,10 @@ class SearxEngineAccessDeniedException(SearxEngineResponseException):
self.suspended_time = suspended_time self.suspended_time = suspended_time
self.message = message self.message = message
def _get_default_suspended_time(self): def _get_default_suspended_time(self) -> int:
from searx import get_setting # pylint: disable=C0415 from searx import get_setting # pylint: disable=C0415
return get_setting(self.SUSPEND_TIME_SETTING) return get_setting(self.SUSPEND_TIME_SETTING) # type: ignore
class SearxEngineCaptchaException(SearxEngineAccessDeniedException): class SearxEngineCaptchaException(SearxEngineAccessDeniedException):

View file

@ -56,7 +56,7 @@ def get_external_url(url_id, item_id, alternative="default"):
def get_earth_coordinates_url(latitude, longitude, osm_zoom, alternative='default'): def get_earth_coordinates_url(latitude, longitude, osm_zoom, alternative='default'):
url = ( url = (
get_external_url('map', None, alternative) get_external_url('map', None, alternative)
.replace('${latitude}', str(latitude)) .replace('${latitude}', str(latitude)) # type: ignore
.replace('${longitude}', str(longitude)) .replace('${longitude}', str(longitude))
.replace('${zoom}', str(osm_zoom)) .replace('${zoom}', str(osm_zoom))
) )

View file

@ -85,7 +85,7 @@ Kong."""
def localeselector(): def localeselector():
locale = 'en' locale = 'en'
if has_request_context(): if has_request_context():
value = flask.request.preferences.get_value('locale') value = flask.request.preferences.get_value('locale') # type: ignore
if value: if value:
locale = value locale = value

View file

@ -87,7 +87,7 @@ class ErrorContext: # pylint: disable=missing-class-docstring
def add_error_context(engine_name: str, error_context: ErrorContext) -> None: def add_error_context(engine_name: str, error_context: ErrorContext) -> None:
errors_for_engine = errors_per_engines.setdefault(engine_name, {}) errors_for_engine = errors_per_engines.setdefault(engine_name, {})
errors_for_engine[error_context] = errors_for_engine.get(error_context, 0) + 1 errors_for_engine[error_context] = errors_for_engine.get(error_context, 0) + 1
engines[engine_name].logger.warning('%s', str(error_context)) engines[engine_name].logger.warning('%s', str(error_context)) # type: ignore
def get_trace(traces): def get_trace(traces):
@ -102,9 +102,9 @@ def get_trace(traces):
def get_hostname(exc: HTTPError) -> typing.Optional[None]: def get_hostname(exc: HTTPError) -> typing.Optional[None]:
url = exc.request.url url = exc.request.url
if url is None and exc.response is not None: if url is None and exc.response is not None: # type: ignore
url = exc.response.url url = exc.response.url # type: ignore
return urlparse(url).netloc return urlparse(url).netloc # type: ignore
def get_request_exception_messages( def get_request_exception_messages(
@ -118,8 +118,8 @@ def get_request_exception_messages(
# exc.request is property that raise an RuntimeException # exc.request is property that raise an RuntimeException
# if exc._request is not defined. # if exc._request is not defined.
url = exc.request.url url = exc.request.url
if url is None and hasattr(exc, 'response') and exc.response is not None: if url is None and hasattr(exc, 'response') and exc.response is not None: # type: ignore
url = exc.response.url url = exc.response.url # type: ignore
if url is not None: if url is not None:
hostname = url.host hostname = url.host
if isinstance(exc, HTTPStatusError): if isinstance(exc, HTTPStatusError):

View file

@ -70,7 +70,7 @@ class Histogram: # pylint: disable=missing-class-docstring
# use Decimal to avoid rounding errors # use Decimal to avoid rounding errors
x = decimal.Decimal(0) x = decimal.Decimal(0)
width = decimal.Decimal(self._width) width = decimal.Decimal(self._width)
width_exponent = -width.as_tuple().exponent width_exponent = -width.as_tuple().exponent # type: ignore
with self._lock: with self._lock:
if self._count > 0: if self._count > 0:
for y in self._quartiles: for y in self._quartiles:

View file

@ -166,9 +166,8 @@ class Network:
for transport in client._mounts.values(): # pylint: disable=protected-access for transport in client._mounts.values(): # pylint: disable=protected-access
if isinstance(transport, AsyncHTTPTransportNoHttp): if isinstance(transport, AsyncHTTPTransportNoHttp):
continue continue
if getattr(transport, "_pool") and getattr( # pylint: disable=protected-access
transport._pool, "_rdns", False # pylint: disable=protected-access if getattr(transport, "_pool") and getattr(transport._pool, "_rdns", False): # type: ignore
):
continue continue
return False return False
response = await client.get("https://check.torproject.org/api/ip", timeout=60) response = await client.get("https://check.torproject.org/api/ip", timeout=60)
@ -238,7 +237,7 @@ class Network:
if isinstance(response, httpx.Response): if isinstance(response, httpx.Response):
# requests compatibility (response is not streamed) # requests compatibility (response is not streamed)
# see also https://www.python-httpx.org/compatibility/#checking-for-4xx5xx-responses # see also https://www.python-httpx.org/compatibility/#checking-for-4xx5xx-responses
response.ok = not response.is_error response.ok = not response.is_error # type: ignore
# raise an exception # raise an exception
if do_raise_for_httperror: if do_raise_for_httperror:

View file

@ -128,10 +128,9 @@ def load_plugin(plugin_module_name, external):
return None return None
# difference with searx: use module name instead of the user name # difference with searx: use module name instead of the user name
plugin.id = plugin_module_name plugin.id = plugin_module_name # type: ignore
# plugin.logger = getLogger(plugin_module_name) # type: ignore
plugin.logger = getLogger(plugin_module_name)
for plugin_attr, plugin_attr_type in required_attrs: for plugin_attr, plugin_attr_type in required_attrs:
if not hasattr(plugin, plugin_attr): if not hasattr(plugin, plugin_attr):
@ -152,7 +151,7 @@ def load_plugin(plugin_module_name, external):
setattr(plugin, plugin_attr, plugin_attr_type()) setattr(plugin, plugin_attr, plugin_attr_type())
if not hasattr(plugin, "preference_section"): if not hasattr(plugin, "preference_section"):
plugin.preference_section = "general" plugin.preference_section = "general" # type: ignore
# query plugin # query plugin
if plugin.preference_section == "query": if plugin.preference_section == "query":
@ -163,7 +162,9 @@ def load_plugin(plugin_module_name, external):
if settings.get("enabled_plugins"): if settings.get("enabled_plugins"):
# searx compatibility: plugin.name in settings['enabled_plugins'] # searx compatibility: plugin.name in settings['enabled_plugins']
plugin.default_on = plugin.name in settings["enabled_plugins"] or plugin.id in settings["enabled_plugins"] plugin.default_on = ( # type: ignore
plugin.name in settings["enabled_plugins"] or plugin.id in settings["enabled_plugins"]
)
# copy resources if this is an external plugin # copy resources if this is an external plugin
if external: if external:

View file

@ -9,7 +9,7 @@ description = "Filter out onion results that appear in Ahmia's blacklist. (See h
default_on = True default_on = True
preference_section = 'onions' preference_section = 'onions'
ahmia_blacklist = None ahmia_blacklist = []
def on_result(_request, _search, result): def on_result(_request, _search, result):

View file

@ -324,7 +324,7 @@ class ClientPref:
# hint: searx.webapp.get_client_settings should be moved into this class # hint: searx.webapp.get_client_settings should be moved into this class
locale: babel.Locale locale: Optional[babel.Locale]
"""Locale prefered by the client.""" """Locale prefered by the client."""
def __init__(self, locale: Optional[babel.Locale] = None): def __init__(self, locale: Optional[babel.Locale] = None):
@ -359,7 +359,7 @@ class ClientPref:
try: try:
qvalue = float(qvalue.split('=')[-1]) qvalue = float(qvalue.split('=')[-1])
locale = babel.Locale.parse(lang, sep='-') locale = babel.Locale.parse(lang, sep='-')
except (ValueError, babel.core.UnknownLocaleError): except (ValueError, babel.core.UnknownLocaleError): # type: ignore
continue continue
pairs.append((locale, qvalue)) pairs.append((locale, qvalue))
@ -548,7 +548,7 @@ class Preferences:
self.tokens.parse_form(user_setting) self.tokens.parse_form(user_setting)
else: else:
self.unknown_params[user_setting_name] = user_setting self.unknown_params[user_setting_name] = user_setting
self.key_value_settings['categories'].parse_form(enabled_categories) self.key_value_settings['categories'].parse_form(enabled_categories) # type: ignore
self.engines.parse_form(disabled_engines) self.engines.parse_form(disabled_engines)
self.plugins.parse_form(disabled_plugins) self.plugins.parse_form(disabled_plugins)

View file

@ -1,6 +1,7 @@
# SPDX-License-Identifier: AGPL-3.0-or-later # SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=invalid-name, missing-module-docstring, missing-class-docstring # pylint: disable=invalid-name, missing-module-docstring, missing-class-docstring
from typing import Any
from abc import abstractmethod, ABC from abc import abstractmethod, ABC
import re import re
@ -18,7 +19,7 @@ class QueryPartParser(ABC):
@staticmethod @staticmethod
@abstractmethod @abstractmethod
def check(raw_value): def check(raw_value) -> Any:
"""Check if raw_value can be parsed""" """Check if raw_value can be parsed"""
def __init__(self, raw_text_query, enable_autocomplete): def __init__(self, raw_text_query, enable_autocomplete):
@ -26,7 +27,7 @@ class QueryPartParser(ABC):
self.enable_autocomplete = enable_autocomplete self.enable_autocomplete = enable_autocomplete
@abstractmethod @abstractmethod
def __call__(self, raw_value): def __call__(self, raw_value) -> Any:
"""Try to parse raw_value: set the self.raw_text_query properties """Try to parse raw_value: set the self.raw_text_query properties
return True if raw_value has been parsed return True if raw_value has been parsed
@ -309,7 +310,7 @@ class RawTextQuery:
self.autocomplete_location = last_index_location self.autocomplete_location = last_index_location
def get_autocomplete_full_query(self, text): def get_autocomplete_full_query(self, text):
qlist, position = self.autocomplete_location qlist, position = self.autocomplete_location # type: ignore
qlist[position] = text qlist[position] = text
return self.getFullQuery() return self.getFullQuery()

View file

@ -33,7 +33,7 @@ logger = logging.getLogger(__name__)
def client() -> redis.Redis: def client() -> redis.Redis:
return _CLIENT return _CLIENT # type: ignore
def initialize(): def initialize():
@ -43,7 +43,7 @@ def initialize():
return False return False
try: try:
# create a client, but no connection is done # create a client, but no connection is done
_CLIENT = redis.Redis.from_url(redis_url) _CLIENT = redis.Redis.from_url(redis_url) # type: ignore
# log the parameters as seen by the redis lib, without the password # log the parameters as seen by the redis lib, without the password
kwargs = _CLIENT.get_connection_kwargs().copy() kwargs = _CLIENT.get_connection_kwargs().copy()
@ -57,11 +57,11 @@ def initialize():
# no error: the redis connection is working # no error: the redis connection is working
logger.info("connected to Redis") logger.info("connected to Redis")
return True return True
except redis.exceptions.RedisError as e: except redis.exceptions.RedisError as e: # type: ignore
_CLIENT = None _CLIENT = None
_pw = pwd.getpwuid(os.getuid()) _pw = pwd.getpwuid(os.getuid())
logger.exception("[%s (%s)] can't connect redis DB ...", _pw.pw_name, _pw.pw_uid) logger.exception("[%s (%s)] can't connect redis DB ...", _pw.pw_name, _pw.pw_uid)
if redis_url == OLD_REDIS_URL_DEFAULT_URL and isinstance(e, redis.exceptions.ConnectionError): if redis_url == OLD_REDIS_URL_DEFAULT_URL and isinstance(e, redis.exceptions.ConnectionError): # type: ignore
logger.info( logger.info(
"You can safely ignore the above Redis error if you don't use Redis. " "You can safely ignore the above Redis error if you don't use Redis. "
"You can remove this error by setting redis.url to false in your settings.yml." "You can remove this error by setting redis.url to false in your settings.yml."

View file

@ -83,7 +83,7 @@ def secret_hash(name: str):
:type name: str :type name: str
""" """
m = hmac.new(bytes(name, encoding='utf-8'), digestmod='sha256') m = hmac.new(bytes(name, encoding='utf-8'), digestmod='sha256')
m.update(bytes(get_setting('server.secret_key'), encoding='utf-8')) m.update(bytes(get_setting('server.secret_key'), encoding='utf-8')) # type: ignore
return m.hexdigest() return m.hexdigest()

View file

@ -1,11 +1,13 @@
# SPDX-License-Identifier: AGPL-3.0-or-later # SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=missing-module-docstring # pylint: disable=missing-module-docstring
from __future__ import annotations
from typing import List, NamedTuple, Set, Callable, Any
import re import re
from collections import defaultdict from collections import defaultdict
from operator import itemgetter from operator import itemgetter
from threading import RLock from threading import RLock
from typing import List, NamedTuple, Set
from urllib.parse import urlparse, unquote from urllib.parse import urlparse, unquote
from searx import logger from searx import logger
@ -61,11 +63,11 @@ def compare_urls(url_a, url_b):
def merge_two_infoboxes(infobox1, infobox2): # pylint: disable=too-many-branches, too-many-statements def merge_two_infoboxes(infobox1, infobox2): # pylint: disable=too-many-branches, too-many-statements
# get engines weights # get engines weights
if hasattr(engines[infobox1['engine']], 'weight'): if hasattr(engines[infobox1['engine']], 'weight'):
weight1 = engines[infobox1['engine']].weight weight1 = engines[infobox1['engine']].weight # type: ignore
else: else:
weight1 = 1 weight1 = 1
if hasattr(engines[infobox2['engine']], 'weight'): if hasattr(engines[infobox2['engine']], 'weight'):
weight2 = engines[infobox2['engine']].weight weight2 = engines[infobox2['engine']].weight # type: ignore
else: else:
weight2 = 1 weight2 = 1
@ -135,7 +137,7 @@ def result_score(result):
for result_engine in result['engines']: for result_engine in result['engines']:
if hasattr(engines[result_engine], 'weight'): if hasattr(engines[result_engine], 'weight'):
weight *= float(engines[result_engine].weight) weight *= float(engines[result_engine].weight) # type: ignore
occurrences = len(result['positions']) occurrences = len(result['positions'])
@ -187,8 +189,8 @@ class ResultContainer:
self.paging = False self.paging = False
self.unresponsive_engines: Set[UnresponsiveEngine] = set() self.unresponsive_engines: Set[UnresponsiveEngine] = set()
self.timings: List[Timing] = [] self.timings: List[Timing] = []
self.redirect_url = None self.redirect_url: str | None = None
self.on_result = lambda _: True self.on_result: Callable[[dict], Any] = lambda _: True
self._lock = RLock() self._lock = RLock()
def extend(self, engine_name, results): # pylint: disable=too-many-branches def extend(self, engine_name, results): # pylint: disable=too-many-branches

View file

@ -50,8 +50,8 @@ class Search:
super().__init__() super().__init__()
self.search_query = search_query self.search_query = search_query
self.result_container = ResultContainer() self.result_container = ResultContainer()
self.start_time = None self.start_time: float = 0
self.actual_timeout = None self.actual_timeout: float = 0
def search_external_bang(self): def search_external_bang(self):
""" """
@ -146,8 +146,8 @@ class Search:
args=(query, request_params, self.result_container, self.start_time, self.actual_timeout), args=(query, request_params, self.result_container, self.start_time, self.actual_timeout),
name=search_id, name=search_id,
) )
th._timeout = False th._timeout = False # type: ignore
th._engine_name = engine_name th._engine_name = engine_name # type: ignore
th.start() th.start()
for th in threading.enumerate(): # pylint: disable=invalid-name for th in threading.enumerate(): # pylint: disable=invalid-name
@ -155,9 +155,9 @@ class Search:
remaining_time = max(0.0, self.actual_timeout - (default_timer() - self.start_time)) remaining_time = max(0.0, self.actual_timeout - (default_timer() - self.start_time))
th.join(remaining_time) th.join(remaining_time)
if th.is_alive(): if th.is_alive():
th._timeout = True th._timeout = True # type: ignore
self.result_container.add_unresponsive_engine(th._engine_name, 'timeout') self.result_container.add_unresponsive_engine(th._engine_name, 'timeout') # type: ignore
PROCESSORS[th._engine_name].logger.error('engine timeout') PROCESSORS[th._engine_name].logger.error('engine timeout') # type: ignore
def search_standard(self): def search_standard(self):
""" """
@ -197,7 +197,7 @@ class SearchWithPlugins(Search):
# * https://github.com/pallets/werkzeug/blob/3c5d3c9bd0d9ce64590f0af8997a38f3823b368d/src/werkzeug/local.py#L548-L559 # * https://github.com/pallets/werkzeug/blob/3c5d3c9bd0d9ce64590f0af8997a38f3823b368d/src/werkzeug/local.py#L548-L559
# * https://werkzeug.palletsprojects.com/en/2.0.x/local/#werkzeug.local.LocalProxy._get_current_object # * https://werkzeug.palletsprojects.com/en/2.0.x/local/#werkzeug.local.LocalProxy._get_current_object
# pylint: enable=line-too-long # pylint: enable=line-too-long
self.request = request._get_current_object() self.request = request._get_current_object() # type: ignore[attr-defined]
def _on_result(self, result): def _on_result(self, result):
return plugins.call(self.ordered_plugin_list, 'on_result', self.request, self, result) return plugins.call(self.ordered_plugin_list, 'on_result', self.request, self, result)

View file

@ -81,7 +81,7 @@ def get_result() -> CheckerResult:
if client is None: if client is None:
# without Redis, the checker is disabled # without Redis, the checker is disabled
return {'status': 'disabled'} return {'status': 'disabled'}
serialized_result: Optional[bytes] = client.get(REDIS_RESULT_KEY) serialized_result: Optional[bytes] = client.get(REDIS_RESULT_KEY) # type: ignore
if serialized_result is None: if serialized_result is None:
# the Redis key does not exist # the Redis key does not exist
return {'status': 'unknown'} return {'status': 'unknown'}

View file

@ -263,7 +263,7 @@ class ResultContainerTests: # pylint: disable=missing-class-docstring
def check_basic(self): def check_basic(self):
if len(self.result_container.unresponsive_engines) > 0: if len(self.result_container.unresponsive_engines) > 0:
for message in self.result_container.unresponsive_engines: for message in self.result_container.unresponsive_engines:
self._record_error(message[1] + ' ' + (message[2] or '')) self._record_error(message.error_type + ' ' + (str(message.suspended) if message.suspended else ''))
self.stop_test = True self.stop_test = True
return return

View file

@ -11,10 +11,10 @@ This scheduler is not generic on purpose: if more feature are required, a dedica
(= a better scheduler should not use the web workers) (= a better scheduler should not use the web workers)
""" """
from typing import Callable
import logging import logging
import time import time
from pathlib import Path from pathlib import Path
from typing import Callable
from searx.redisdb import client as get_redis_client from searx.redisdb import client as get_redis_client
from searx.redislib import lua_script_storage from searx.redislib import lua_script_storage

View file

@ -2,7 +2,7 @@
# pylint: disable=missing-module-docstring # pylint: disable=missing-module-docstring
import typing import typing
import babel import babel.core
class EngineRef: class EngineRef:

View file

@ -77,6 +77,6 @@ def initialize(engine_list):
processor = get_processor(engine, engine_name) processor = get_processor(engine, engine_name)
initialize_processor(processor) initialize_processor(processor)
if processor is None: if processor is None:
engine.logger.error('Error get processor for engine %s', engine_name) engine.logger.error('Error get processor for engine %s', engine_name) # type: ignore
else: else:
PROCESSORS[engine_name] = processor PROCESSORS[engine_name] = processor

View file

@ -63,7 +63,7 @@ class EngineProcessor(ABC):
def __init__(self, engine, engine_name: str): def __init__(self, engine, engine_name: str):
self.engine = engine self.engine = engine
self.engine_name = engine_name self.engine_name = engine_name
self.logger = engines[engine_name].logger self.logger = engines[engine_name].logger # type: ignore
key = get_network(self.engine_name) key = get_network(self.engine_name)
key = id(key) if key else self.engine_name key = id(key) if key else self.engine_name
self.suspended_status = SUSPENDED_STATUS.setdefault(key, SuspendedStatus()) self.suspended_status = SUSPENDED_STATUS.setdefault(key, SuspendedStatus())

View file

@ -147,7 +147,7 @@ class OnlineProcessor(EngineProcessor):
response = self._send_http_request(params) response = self._send_http_request(params)
# parse the response # parse the response
response.search_params = params response.search_params = params # type: ignore
return self.engine.response(response) return self.engine.response(response)
def search(self, query, params, result_container, start_time, timeout_limit): def search(self, query, params, result_container, start_time, timeout_limit):

View file

@ -3,6 +3,7 @@
""" """
from __future__ import annotations
import typing import typing
import numbers import numbers
import errno import errno
@ -49,7 +50,7 @@ class SettingsValue:
self, self,
type_definition: typing.Union[None, typing.Any, typing.Tuple[typing.Any]] = None, type_definition: typing.Union[None, typing.Any, typing.Tuple[typing.Any]] = None,
default: typing.Any = None, default: typing.Any = None,
environ_name: str = None, environ_name: str | None = None,
): ):
self.type_definition = ( self.type_definition = (
type_definition if type_definition is None or isinstance(type_definition, tuple) else (type_definition,) type_definition if type_definition is None or isinstance(type_definition, tuple) else (type_definition,)
@ -59,13 +60,13 @@ class SettingsValue:
@property @property
def type_definition_repr(self): def type_definition_repr(self):
types_str = [t.__name__ if isinstance(t, type) else repr(t) for t in self.type_definition] types_str = [t.__name__ if isinstance(t, type) else repr(t) for t in self.type_definition] # type: ignore
return ', '.join(types_str) return ', '.join(types_str)
def check_type_definition(self, value: typing.Any) -> None: def check_type_definition(self, value: typing.Any) -> None:
if value in self.type_definition: if value in self.type_definition:
return return
type_list = tuple(t for t in self.type_definition if isinstance(t, type)) type_list = tuple(t for t in self.type_definition if isinstance(t, type)) # type: ignore
if not isinstance(value, type_list): if not isinstance(value, type_list):
raise ValueError('The value has to be one of these types/values: {}'.format(self.type_definition_repr)) raise ValueError('The value has to be one of these types/values: {}'.format(self.type_definition_repr))
@ -89,7 +90,7 @@ class SettingSublistValue(SettingsValue):
if not isinstance(value, list): if not isinstance(value, list):
raise ValueError('The value has to a list') raise ValueError('The value has to a list')
for item in value: for item in value:
if not item in self.type_definition[0]: if not item in self.type_definition[0]: # type: ignore
raise ValueError('{} not in {}'.format(item, self.type_definition)) raise ValueError('{} not in {}'.format(item, self.type_definition))

View file

@ -52,7 +52,7 @@ _STORAGE_UNIT_VALUE: Dict[str, int] = {
} }
_XPATH_CACHE: Dict[str, XPath] = {} _XPATH_CACHE: Dict[str, XPath] = {}
_LANG_TO_LC_CACHE: Dict[str, Dict[str, str]] = {} _LANG_TO_LC_CACHE: Dict[str, Dict[str, str]] = {} # type: ignore
_FASTTEXT_MODEL: Optional["fasttext.FastText._FastText"] = None # type: ignore _FASTTEXT_MODEL: Optional["fasttext.FastText._FastText"] = None # type: ignore
"""fasttext model to predict laguage of a search term""" """fasttext model to predict laguage of a search term"""
@ -214,7 +214,7 @@ def extract_text(xpath_results, allow_none: bool = False) -> Optional[str]:
return result.strip() return result.strip()
if isinstance(xpath_results, ElementBase): if isinstance(xpath_results, ElementBase):
# it's a element # it's a element
text: str = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False) text: str = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False) # type: ignore
text = text.strip().replace('\n', ' ') text = text.strip().replace('\n', ' ')
return ' '.join(text.split()) return ' '.join(text.split())
if isinstance(xpath_results, (str, Number, bool)): if isinstance(xpath_results, (str, Number, bool)):
@ -564,7 +564,7 @@ def eval_xpath_list(element: ElementBase, xpath_spec: XPathSpecType, min_len: Op
return result return result
def eval_xpath_getindex(elements: ElementBase, xpath_spec: XPathSpecType, index: int, default=_NOTSET): def eval_xpath_getindex(elements: ElementBase, xpath_spec: XPathSpecType, index: int, default=_NOTSET) -> ElementBase:
"""Call eval_xpath_list then get one element using the index parameter. """Call eval_xpath_list then get one element using the index parameter.
If the index does not exist, either raise an exception is default is not set, If the index does not exist, either raise an exception is default is not set,
other return the default value (can be None). other return the default value (can be None).
@ -599,7 +599,7 @@ def _get_fasttext_model() -> "fasttext.FastText._FastText": # type: ignore
import fasttext # pylint: disable=import-outside-toplevel import fasttext # pylint: disable=import-outside-toplevel
# Monkey patch: prevent fasttext from showing a (useless) warning when loading a model. # Monkey patch: prevent fasttext from showing a (useless) warning when loading a model.
fasttext.FastText.eprint = lambda x: None fasttext.FastText.eprint = lambda x: None # type: ignore
_FASTTEXT_MODEL = fasttext.load_model(str(data_dir / 'lid.176.ftz')) _FASTTEXT_MODEL = fasttext.load_model(str(data_dir / 'lid.176.ftz'))
return _FASTTEXT_MODEL return _FASTTEXT_MODEL

View file

@ -55,7 +55,7 @@ def parse_pageno(form: Dict[str, str]) -> int:
def parse_lang(preferences: Preferences, form: Dict[str, str], raw_text_query: RawTextQuery) -> str: def parse_lang(preferences: Preferences, form: Dict[str, str], raw_text_query: RawTextQuery) -> str:
if is_locked('language'): if is_locked('language'):
return preferences.get_value('language') return preferences.get_value('language') # type: ignore
# get language # get language
# set specific language if set on request, query or preferences # set specific language if set on request, query or preferences
# search with multiple languages is not supported (by most engines) # search with multiple languages is not supported (by most engines)
@ -67,15 +67,15 @@ def parse_lang(preferences: Preferences, form: Dict[str, str], raw_text_query: R
query_lang = preferences.get_value('language') query_lang = preferences.get_value('language')
# check language # check language
if not VALID_LANGUAGE_CODE.match(query_lang) and query_lang != 'auto': if not VALID_LANGUAGE_CODE.match(query_lang) and query_lang != 'auto': # type: ignore
raise SearxParameterException('language', query_lang) raise SearxParameterException('language', query_lang)
return query_lang return query_lang # type: ignore
def parse_safesearch(preferences: Preferences, form: Dict[str, str]) -> int: def parse_safesearch(preferences: Preferences, form: Dict[str, str]) -> int:
if is_locked('safesearch'): if is_locked('safesearch'):
return preferences.get_value('safesearch') return preferences.get_value('safesearch') # type: ignore
if 'safesearch' in form: if 'safesearch' in form:
query_safesearch = form.get('safesearch') query_safesearch = form.get('safesearch')
@ -87,10 +87,10 @@ def parse_safesearch(preferences: Preferences, form: Dict[str, str]) -> int:
query_safesearch = preferences.get_value('safesearch') query_safesearch = preferences.get_value('safesearch')
# safesearch : second check # safesearch : second check
if query_safesearch < 0 or query_safesearch > 2: if query_safesearch < 0 or query_safesearch > 2: # type: ignore
raise SearxParameterException('safesearch', query_safesearch) raise SearxParameterException('safesearch', query_safesearch)
return query_safesearch return query_safesearch # type: ignore
def parse_time_range(form: Dict[str, str]) -> Optional[str]: def parse_time_range(form: Dict[str, str]) -> Optional[str]:
@ -145,7 +145,7 @@ def get_selected_categories(preferences: Preferences, form: Optional[Dict[str, s
# (is stored in cookie) # (is stored in cookie)
if not selected_categories: if not selected_categories:
cookie_categories = preferences.get_value('categories') cookie_categories = preferences.get_value('categories')
for ccateg in cookie_categories: for ccateg in cookie_categories: # type: ignore
selected_categories.append(ccateg) selected_categories.append(ccateg)
# if still no category is specified, using general # if still no category is specified, using general

View file

@ -171,7 +171,7 @@ class ExtendedRequest(flask.Request):
preferences: Preferences preferences: Preferences
errors: List[str] errors: List[str]
user_plugins: List[Plugin] user_plugins: List[Plugin]
form: Dict[str, str] form: Dict[str, str] # type: ignore
start_time: float start_time: float
render_time: float render_time: float
timings: List[Timing] timings: List[Timing]

View file

@ -56,7 +56,7 @@ def get_search_query(
) -> searx.search.SearchQuery: ) -> searx.search.SearchQuery:
"""Get search results for the query""" """Get search results for the query"""
if engine_categories is None: if engine_categories is None:
engine_categories = list(searx.engines.categories.keys()) engine_categories = list(searx.engines.categories.keys()) # type: ignore
try: try:
category = args.category.decode('utf-8') category = args.category.decode('utf-8')
except AttributeError: except AttributeError:
@ -68,7 +68,7 @@ def get_search_query(
"language": args.lang, "language": args.lang,
"time_range": args.timerange, "time_range": args.timerange,
} }
preferences = searx.preferences.Preferences(['simple'], engine_categories, searx.engines.engines, []) preferences = searx.preferences.Preferences(['simple'], engine_categories, searx.engines.engines, []) # type: ignore
preferences.key_value_settings['safesearch'].parse(args.safesearch) preferences.key_value_settings['safesearch'].parse(args.safesearch)
search_query = searx.webadapter.get_search_query_from_webapp(preferences, form)[0] search_query = searx.webadapter.get_search_query_from_webapp(preferences, form)[0]
@ -141,7 +141,7 @@ def parse_argument(
Namespace(category='general', lang='all', pageno=1, query='rain', safesearch='0', timerange=None) Namespace(category='general', lang='all', pageno=1, query='rain', safesearch='0', timerange=None)
""" # noqa: E501 """ # noqa: E501
if not category_choices: if not category_choices:
category_choices = list(searx.engines.categories.keys()) category_choices = list(searx.engines.categories.keys()) # type: ignore
parser = argparse.ArgumentParser(description='Standalone searx.') parser = argparse.ArgumentParser(description='Standalone searx.')
parser.add_argument('query', type=str, help='Text query') parser.add_argument('query', type=str, help='Text query')
parser.add_argument( parser.add_argument(
@ -166,7 +166,7 @@ def parse_argument(
if __name__ == '__main__': if __name__ == '__main__':
settings_engines = searx.settings['engines'] settings_engines = searx.settings['engines']
searx.search.load_engines(settings_engines) searx.search.load_engines(settings_engines)
engine_cs = list(searx.engines.categories.keys()) engine_cs = list(searx.engines.categories.keys()) # type: ignore
prog_args = parse_argument(category_choices=engine_cs) prog_args = parse_argument(category_choices=engine_cs)
searx.search.initialize_network(settings_engines, searx.settings['outgoing']) searx.search.initialize_network(settings_engines, searx.settings['outgoing'])
searx.search.check_network_configuration() searx.search.check_network_configuration()

View file

@ -178,7 +178,7 @@ def get_website_description(url, lang1, lang2=None):
def initialize(): def initialize():
global IDS, LANGUAGES_SPARQL global IDS, LANGUAGES_SPARQL
searx.search.initialize() searx.search.initialize()
wikipedia_engine = searx.engines.engines['wikipedia'] wikipedia_engine = searx.engines.engines['wikipedia'] # type: ignore
locale2lang = {'nl-BE': 'nl'} locale2lang = {'nl-BE': 'nl'}
for sxng_ui_lang in LOCALE_NAMES: for sxng_ui_lang in LOCALE_NAMES:
@ -196,7 +196,7 @@ def initialize():
WIKIPEDIA_LANGUAGES[sxng_ui_lang] = wiki_lang WIKIPEDIA_LANGUAGES[sxng_ui_lang] = wiki_lang
LANGUAGES_SPARQL = ', '.join(f"'{l}'" for l in set(WIKIPEDIA_LANGUAGES.values())) LANGUAGES_SPARQL = ', '.join(f"'{l}'" for l in set(WIKIPEDIA_LANGUAGES.values()))
for engine_name, engine in searx.engines.engines.items(): for engine_name, engine in searx.engines.engines.items(): # type: ignore
descriptions[engine_name] = {} descriptions[engine_name] = {}
wikidata_id = getattr(engine, "about", {}).get('wikidata_id') wikidata_id = getattr(engine, "about", {}).get('wikidata_id')
if wikidata_id is not None: if wikidata_id is not None:
@ -209,7 +209,7 @@ def fetch_wikidata_descriptions():
print('Fetching wikidata descriptions') print('Fetching wikidata descriptions')
searx.network.set_timeout_for_thread(60) searx.network.set_timeout_for_thread(60)
result = wikidata.send_wikidata_query( result = wikidata.send_wikidata_query(
SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL) SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL) # type: ignore
) )
if result is not None: if result is not None:
for binding in result['results']['bindings']: for binding in result['results']['bindings']:
@ -230,7 +230,7 @@ def fetch_wikidata_descriptions():
def fetch_wikipedia_descriptions(): def fetch_wikipedia_descriptions():
print('Fetching wikipedia descriptions') print('Fetching wikipedia descriptions')
result = wikidata.send_wikidata_query( result = wikidata.send_wikidata_query(
SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL) SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL) # type: ignore
) )
if result is not None: if result is not None:
for binding in result['results']['bindings']: for binding in result['results']['bindings']:
@ -313,7 +313,7 @@ def fetch_website_description(engine_name, website):
def fetch_website_descriptions(): def fetch_website_descriptions():
print('Fetching website descriptions') print('Fetching website descriptions')
for engine_name, engine in searx.engines.engines.items(): for engine_name, engine in searx.engines.engines.items(): # type: ignore
website = getattr(engine, "about", {}).get('website') website = getattr(engine, "about", {}).get('website')
if website is None and hasattr(engine, "search_url"): if website is None and hasattr(engine, "search_url"):
website = normalize_url(getattr(engine, "search_url")) website = normalize_url(getattr(engine, "search_url"))

View file

@ -44,7 +44,7 @@ class SearxRobotLayer:
[exe, webapp], stdout=subprocess.PIPE, stderr=subprocess.STDOUT [exe, webapp], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) )
if hasattr(self.server.stdout, 'read1'): if hasattr(self.server.stdout, 'read1'):
print(self.server.stdout.read1(1024).decode()) print(self.server.stdout.read1(1024).decode()) # type: ignore
def tearDown(self): def tearDown(self):
os.kill(self.server.pid, 9) os.kill(self.server.pid, 9)
@ -55,7 +55,11 @@ class SearxRobotLayer:
def run_robot_tests(tests): def run_robot_tests(tests):
print('Running {0} tests'.format(len(tests))) print('Running {0} tests'.format(len(tests)))
for test in tests: for test in tests:
with Browser('firefox', headless=True, profile_preferences={'intl.accept_languages': 'en'}) as browser: with Browser(
'firefox',
headless=True,
profile_preferences={'intl.accept_languages': 'en'},
) as browser: # type: ignore
test(browser) test(browser)

View file

@ -168,8 +168,8 @@ commit '''
git_log_engine.result_separator = '\n\ncommit ' git_log_engine.result_separator = '\n\ncommit '
git_log_engine.delimiter = {} git_log_engine.delimiter = {}
git_log_engine.parse_regex = { git_log_engine.parse_regex = {
'commit': '\w{40}', 'commit': r'\w{40}',
'author': '[\w* ]* <\w*@?\w*\.?\w*>', 'author': r'[\w* ]* <\w*@?\w*\.?\w*>',
'date': 'Date: .*', 'date': 'Date: .*',
'message': '\n\n.*$', 'message': '\n\n.*$',
} }

View file

@ -10,13 +10,13 @@ from tests import SearxTestCase
class TestXpathEngine(SearxTestCase): class TestXpathEngine(SearxTestCase):
def test_request(self): def test_request(self):
xpath.search_url = 'https://url.com/{query}' xpath.search_url = 'https://url.com/{query}'
xpath.categories = [] xpath.categories = [] # type: ignore
xpath.paging = False xpath.paging = False
query = 'test_query' query = 'test_query'
dicto = defaultdict(dict) dicto = defaultdict(dict)
params = xpath.request(query, dicto) params = xpath.request(query, dicto)
self.assertIn('url', params) self.assertIn('url', params)
self.assertEquals('https://url.com/test_query', params['url']) self.assertEquals('https://url.com/test_query', params['url']) # type: ignore
xpath.search_url = 'https://url.com/q={query}&p={pageno}' xpath.search_url = 'https://url.com/q={query}&p={pageno}'
xpath.paging = True xpath.paging = True
@ -25,7 +25,7 @@ class TestXpathEngine(SearxTestCase):
dicto['pageno'] = 1 dicto['pageno'] = 1
params = xpath.request(query, dicto) params = xpath.request(query, dicto)
self.assertIn('url', params) self.assertIn('url', params)
self.assertEquals('https://url.com/q=test_query&p=1', params['url']) self.assertEquals('https://url.com/q=test_query&p=1', params['url']) # type: ignore
def test_response(self): def test_response(self):
# without results_xpath # without results_xpath
@ -41,7 +41,7 @@ class TestXpathEngine(SearxTestCase):
response = mock.Mock(text='<html></html>') response = mock.Mock(text='<html></html>')
self.assertEqual(xpath.response(response), []) self.assertEqual(xpath.response(response), [])
html = u""" html = """
<div> <div>
<div class="search_result"> <div class="search_result">
<a class="result" href="https://result1.com">Result 1</a> <a class="result" href="https://result1.com">Result 1</a>
@ -76,7 +76,7 @@ class TestXpathEngine(SearxTestCase):
self.assertFalse(results[0].get('is_onion', False)) self.assertFalse(results[0].get('is_onion', False))
# results are onion urls (no results_xpath) # results are onion urls (no results_xpath)
xpath.categories = ['onions'] xpath.categories = ['onions'] # type: ignore
results = xpath.response(response) results = xpath.response(response)
self.assertTrue(results[0]['is_onion']) self.assertTrue(results[0]['is_onion'])
@ -86,7 +86,7 @@ class TestXpathEngine(SearxTestCase):
xpath.title_xpath = './/a[@class="result"]' xpath.title_xpath = './/a[@class="result"]'
xpath.content_xpath = './/p[@class="content"]' xpath.content_xpath = './/p[@class="content"]'
xpath.cached_xpath = None xpath.cached_xpath = None
xpath.categories = [] xpath.categories = [] # type: ignore
self.assertRaises(AttributeError, xpath.response, None) self.assertRaises(AttributeError, xpath.response, None)
self.assertRaises(AttributeError, xpath.response, []) self.assertRaises(AttributeError, xpath.response, [])
@ -117,6 +117,6 @@ class TestXpathEngine(SearxTestCase):
self.assertFalse(results[0].get('is_onion', False)) self.assertFalse(results[0].get('is_onion', False))
# results are onion urls (with results_xpath) # results are onion urls (with results_xpath)
xpath.categories = ['onions'] xpath.categories = ['onions'] # type: ignore
results = xpath.response(response) results = xpath.response(response)
self.assertTrue(results[0]['is_onion']) self.assertTrue(results[0]['is_onion'])

View file

@ -54,7 +54,7 @@ class TestEnginesInit(SearxTestCase): # pylint: disable=missing-class-docstring
self.assertIn('engine1', engines.engines) self.assertIn('engine1', engines.engines)
self.assertIn('engine2', engines.engines) self.assertIn('engine2', engines.engines)
self.assertIn('onions', engines.categories) self.assertIn('onions', engines.categories)
self.assertIn('http://engine1.onion', engines.engines['engine1'].search_url) self.assertIn('http://engine1.onion', engines.engines['engine1'].search_url) # type: ignore
self.assertEqual(engines.engines['engine1'].timeout, 120.0) self.assertEqual(engines.engines['engine1'].timeout, 120.0)
def test_missing_name_field(self): def test_missing_name_field(self):

View file

@ -42,10 +42,12 @@ class PluginStoreTest(SearxTestCase): # pylint: disable=missing-class-docstring
request = Mock() request = Mock()
store.call([], 'asdf', request, Mock()) store.call([], 'asdf', request, Mock())
self.assertFalse(testplugin.asdf.called) # pylint: disable=E1101 # pylint: disable=no-member
self.assertFalse(testplugin.asdf.called) # type: ignore
store.call([testplugin], 'asdf', request, Mock()) store.call([testplugin], 'asdf', request, Mock())
self.assertTrue(testplugin.asdf.called) # pylint: disable=E1101 self.assertTrue(testplugin.asdf.called) # type: ignore
# pylint: enable=no-member
class SelfIPTest(SearxTestCase): # pylint: disable=missing-class-docstring class SelfIPTest(SearxTestCase): # pylint: disable=missing-class-docstring

View file

@ -44,18 +44,13 @@ test.pylint() {
} }
test.pyright() { test.pyright() {
build_msg TEST "[pyright] static type check of python sources"
node.env.dev
# We run Pyright in the virtual environment because Pyright # We run Pyright in the virtual environment because Pyright
# executes "python" to determine the Python version. # executes "python" to determine the Python version.
build_msg TEST "[pyright] suppress warnings related to intentional monkey patching" ( set -e
pyenv.cmd npx --no-install pyright -p pyrightconfig-ci.json \ build_msg TEST "[pyright] static type check of python sources"
| grep -v ".py$" \ node.env.dev
| grep -v '/engines/.*.py.* - warning: "logger" is not defined'\ pyenv.cmd npx --no-install pyright -p pyrightconfig.json
| grep -v '/plugins/.*.py.* - error: "logger" is not defined'\ )
| grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' \
| grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' \
| grep -v '/engines/.*.py.* - warning: "categories" is not defined'
dump_return $? dump_return $?
} }
@ -108,4 +103,3 @@ test.clean() {
rm -rf geckodriver.log .coverage coverage/ rm -rf geckodriver.log .coverage coverage/
dump_return $? dump_return $?
} }