cr suggestions

This commit is contained in:
Daniel Kukula 2024-05-07 07:11:30 +01:00
parent baf5a446ce
commit 8a400c8969
4 changed files with 30 additions and 29 deletions

View file

@ -556,10 +556,6 @@ type.
- :py:class:`str`
- the web location of a license copy
* - homepage
- :py:class:`str`
- the url of the project's homepage
* - source_code_url
- :py:class:`str`
- the location of the project's source code
* - links
- :py:class:`dict`
- a dictionary of link name to link url, e.g. {"homepage": {http://example.com"}

View file

@ -33,7 +33,7 @@ def response(resp):
results = []
for package in resp.json():
meta = package["meta"]
published_date = package.get("inserted_at")
published_date = package.get("updated_at")
published_date = parser.parse(published_date)
links = {"documentation_url": package["docs_html_url"]}
links = {**links, **meta.get("links", {})}
@ -46,7 +46,7 @@ def response(resp):
"content": meta.get("description", ""),
"version": meta.get("latest_version"),
"maintainer": ", ".join(meta.get("maintainers", [])),
"published_date": published_date,
"publishedDate": published_date,
"license_name": ", ".join(meta.get("licenses", [])),
"links": links,
}

View file

@ -1,5 +1,7 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""npms.io"""
"""npms.io
"""
from urllib.parse import urlencode
from dateutil import parser
@ -14,7 +16,7 @@ about = {
"results": "JSON",
}
categories = ["it", "packages"]
categories = ['it', 'packages']
# engine dependent config
@ -24,14 +26,15 @@ search_api = "https://api.npms.io/v2/search?"
def request(query: str, params):
args = urlencode(
{
"from": (params["pageno"] - 1) * page_size,
"q": query,
"size": page_size,
'from': (params["pageno"] - 1) * page_size,
'q': query,
'size': page_size,
}
)
params["url"] = search_api + args
params['url'] = search_api + args
return params
@ -40,9 +43,9 @@ def response(resp):
content = resp.json()
for entry in content["results"]:
package = entry["package"]
published_date = package.get("date")
if published_date:
published_date = parser.parse(published_date)
publishedDate = package.get("date")
if publishedDate:
publishedDate = parser.parse(publishedDate)
tags = list(entry.get("flags", {}).keys()) + package.get("keywords", [])
links = {
"homepage": package["links"].get("homepage"),
@ -53,11 +56,11 @@ def response(resp):
"template": "packages.html",
"url": package["links"]["npm"],
"title": package["name"],
"package_name": package["name"],
'package_name': package["name"],
"content": package.get("description", ""),
"version": package.get("version"),
"maintainer": package.get("author", {}).get("name"),
"published_date": published_date,
"publishedDate": publishedDate,
"tags": tags,
"links": links,
}

View file

@ -1,5 +1,7 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""pypi.org"""
"""pypi.org
"""
from urllib.parse import urlencode
from dateutil import parser
@ -21,21 +23,21 @@ about = {
"results": "HTML",
}
categories = ["it", "packages"]
categories = ['it', 'packages']
# engine dependent config
first_page_num = 1
base_url = "https://pypi.org"
search_url = base_url + "/search/?{query}"
search_url = base_url + '/search/?{query}'
def request(query, params):
args = {
"q": query,
"page": params["pageno"],
"page": params['pageno'],
}
params["url"] = search_url.format(query=urlencode(args))
params['url'] = search_url.format(query=urlencode(args))
return params
@ -43,22 +45,22 @@ def response(resp):
results = []
dom = html.fromstring(resp.text)
for entry in eval_xpath_list(dom, '/html/body/main/div/div/div/form/div/ul/li/a[@class="package-snippet"]'):
url = base_url + extract_text(eval_xpath_getindex(entry, "./@href", 0)) # type: ignore
url = base_url + extract_text(eval_xpath_getindex(entry, './@href', 0)) # type: ignore
title = extract_text(eval_xpath_getindex(entry, './h3/span[@class="package-snippet__name"]', 0))
version = extract_text(eval_xpath_getindex(entry, './h3/span[@class="package-snippet__version"]', 0))
created_at = extract_text(
eval_xpath_getindex(entry, './h3/span[@class="package-snippet__created"]/time/@datetime', 0)
)
content = extract_text(eval_xpath_getindex(entry, "./p", 0))
content = extract_text(eval_xpath_getindex(entry, './p', 0))
results.append(
{
"template": "packages.html",
"url": url,
"title": title,
"package_name": title,
'package_name': title,
"content": content,
"version": version,
"published_date": parser.parse(created_at), # type: ignore
'publishedDate': parser.parse(created_at), # type: ignore
}
)