diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py index 358548247..b2750d0c2 100644 --- a/searx/engines/__init__.py +++ b/searx/engines/__init__.py @@ -163,7 +163,7 @@ def score_results(results): duplicated = new_res break if duplicated: - if len(res.get('content', '')) > len(duplicated.get('content', '')): # noqa + if res.get('content') > duplicated.get('content'): duplicated['content'] = res['content'] duplicated['score'] += score duplicated['engines'].append(res['engine']) diff --git a/searx/engines/json_engine.py b/searx/engines/json_engine.py index e7cc808bb..708b999f8 100644 --- a/searx/engines/json_engine.py +++ b/searx/engines/json_engine.py @@ -39,7 +39,7 @@ def parse(query): def do_query(data, q): ret = [] - if not len(q): + if not q: return ret qkey = q[0] diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py index fcfc21160..cbafe0976 100644 --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -35,7 +35,7 @@ def response(resp): title = link.text_content() content = '' - if len(result.xpath('./p[@class="desc"]')): + if result.xpath('./p[@class="desc"]'): content = result.xpath('./p[@class="desc"]')[0].text_content() results.append({'url': url, 'title': title, 'content': content}) diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index 04b021e33..9af24de3b 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -23,7 +23,7 @@ if xpath_results is a string element, then it's already done def extract_text(xpath_results): if type(xpath_results) == list: # it's list of result : concat everything using recursive call - if not len(xpath_results): + if not xpath_results: raise Exception('Empty url resultset') result = '' for e in xpath_results: diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py index a4a41ac3b..efdf846ac 100644 --- a/searx/engines/yacy.py +++ b/searx/engines/yacy.py @@ -13,7 +13,7 @@ def request(query, params): def response(resp): raw_search_results = loads(resp.text) - if not len(raw_search_results): + if not raw_search_results: return [] search_results = raw_search_results.get('channels', {})[0].get('items', []) @@ -26,10 +26,10 @@ def response(resp): tmp_result['url'] = result['link'] tmp_result['content'] = '' - if len(result['description']): + if result['description']: tmp_result['content'] += result['description'] + "
" - if len(result['pubDate']): + if result['pubDate']: tmp_result['content'] += result['pubDate'] + "
" if result['size'] != '-1': diff --git a/searx/engines/youtube.py b/searx/engines/youtube.py index 6e4e8859a..5b04f3513 100644 --- a/searx/engines/youtube.py +++ b/searx/engines/youtube.py @@ -22,9 +22,10 @@ def response(resp): if not 'feed' in search_results: return results feed = search_results['feed'] + for result in feed['entry']: url = [x['href'] for x in result['link'] if x['type'] == 'text/html'] - if not len(url): + if not url: return # remove tracking url = url[0].replace('feature=youtube_gdata', '') @@ -32,12 +33,13 @@ def response(resp): url = url[:-1] title = result['title']['$t'] content = '' - thumbnail = '' - if len(result['media$group']['media$thumbnail']): + + if result['media$group']['media$thumbnail']: thumbnail = result['media$group']['media$thumbnail'][0]['url'] content += ''.format(url, thumbnail) # noqa - if len(content): + + if content: content += '
' + result['content']['$t'] else: content = result['content']['$t'] diff --git a/searx/search.py b/searx/search.py index f71adb2aa..7f991045b 100644 --- a/searx/search.py +++ b/searx/search.py @@ -49,7 +49,7 @@ class Search(object): self.categories = [] - if len(self.engines): + if self.engines: self.categories = list(set(engine['category'] for engine in self.engines)) else: @@ -59,13 +59,13 @@ class Search(object): if not category in categories: continue self.categories.append(category) - if not len(self.categories): + if not self.categories: cookie_categories = request.cookies.get('categories', '') cookie_categories = cookie_categories.split(',') for ccateg in cookie_categories: if ccateg in categories: self.categories.append(ccateg) - if not len(self.categories): + if not self.categories: self.categories = ['general'] for categ in self.categories: diff --git a/searx/webapp.py b/searx/webapp.py index 84d9c3f03..7477e1722 100644 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -91,7 +91,7 @@ def render(template_name, **kwargs): for ccateg in cookie_categories: if ccateg in categories: kwargs['selected_categories'].append(ccateg) - if not len(kwargs['selected_categories']): + if not kwargs['selected_categories']: kwargs['selected_categories'] = ['general'] return render_template(template_name, **kwargs) @@ -150,12 +150,12 @@ def index(): elif search.request_data.get('format') == 'csv': csv = UnicodeWriter(cStringIO.StringIO()) keys = ('title', 'url', 'content', 'host', 'engine', 'score') - if len(search.results): + if search.results: csv.writerow(keys) for row in search.results: row['host'] = row['parsed_url'].netloc csv.writerow([row.get(key, '') for key in keys]) - csv.stream.seek(0) + csv.stream.seek(0) response = Response(csv.stream.read(), mimetype='application/csv') cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query) response.headers.add('Content-Disposition', cont_disp)