141 lines
4.4 KiB
Python
141 lines
4.4 KiB
Python
import argparse
|
||
import requests
|
||
from bs4 import BeautifulSoup
|
||
from packaging.version import Version, InvalidVersion
|
||
import sys
|
||
|
||
|
||
def fetch_html(url: str) -> str:
|
||
try:
|
||
response = requests.get(url)
|
||
response.raise_for_status()
|
||
return response.text
|
||
except requests.RequestException as e:
|
||
print(f"Error fetching {url}: {e}")
|
||
return ""
|
||
|
||
|
||
def parse_html(html: str) -> list:
|
||
soup = BeautifulSoup(html, "html.parser")
|
||
table = soup.find("table", id="sortable-table")
|
||
if not table:
|
||
return []
|
||
|
||
rows = table.find_all("tr", class_="vue--table__row")
|
||
results = []
|
||
for row in rows:
|
||
info = {}
|
||
link = row.find("a")
|
||
chip = row.find("span", class_="vue--chip__value")
|
||
if link and chip:
|
||
info["link"] = link.get_text(strip=True)
|
||
info["chip"] = chip.get_text(strip=True)
|
||
results.append(info)
|
||
return results
|
||
|
||
|
||
def load_requirements(file_path: str) -> list:
|
||
requirements = []
|
||
try:
|
||
with open(file_path, "r") as file:
|
||
for line in file:
|
||
line = line.strip()
|
||
if line and not line.startswith("#"):
|
||
requirements.append(line)
|
||
except FileNotFoundError:
|
||
print(f"Error: File {file_path} not found.")
|
||
sys.exit(1)
|
||
return requirements
|
||
|
||
|
||
def version_in_range(version, range_str: str) -> bool:
|
||
if version is not None:
|
||
try:
|
||
v = Version(version)
|
||
except InvalidVersion:
|
||
return False
|
||
else:
|
||
# 如果没有给版本号,默认使用最新版本
|
||
if range_str[-2] == ",":
|
||
return True
|
||
|
||
ranges = range_str.split(",")
|
||
for range_part in ranges:
|
||
range_part = range_part.strip("[]()")
|
||
if range_part:
|
||
try:
|
||
if range_part.endswith(")"):
|
||
upper = Version(range_part[:-1])
|
||
if v >= upper:
|
||
return False
|
||
elif range_part.startswith("["):
|
||
lower = Version(range_part[1:])
|
||
if v < lower:
|
||
return False
|
||
except InvalidVersion:
|
||
return False
|
||
return True
|
||
|
||
|
||
def check_vulnerabilities(requirements: list, base_url: str, output_file: str):
|
||
with open(output_file, "w") as out_file:
|
||
for req in requirements:
|
||
version = ""
|
||
# 如果有版本
|
||
if "==" in req:
|
||
package_name, version = req.split("==")
|
||
# 没有版本
|
||
else:
|
||
package_name, version = req, None
|
||
# 拼接URL
|
||
url = f"{base_url}{package_name}"
|
||
print(f"Fetching data for {package_name} from {url}")
|
||
html_content = fetch_html(url)
|
||
if html_content:
|
||
# 解析hmtl
|
||
extracted_data = parse_html(html_content)
|
||
if extracted_data:
|
||
relevant_vulns = []
|
||
for vuln in extracted_data:
|
||
if version_in_range(version, vuln["chip"]):
|
||
relevant_vulns.append(vuln)
|
||
if relevant_vulns:
|
||
out_file.write(f"Vulnerabilities found for {package_name}:\n")
|
||
for vuln in relevant_vulns:
|
||
out_file.write(f" - {vuln['link']}\n")
|
||
out_file.write("\n")
|
||
else:
|
||
print(f"No relevant data found for {package_name}.")
|
||
else:
|
||
print(f"Failed to fetch data for {package_name}.")
|
||
|
||
|
||
def main():
|
||
parser = argparse.ArgumentParser(
|
||
description="Check project dependencies for vulnerabilities."
|
||
)
|
||
parser.add_argument(
|
||
"-r",
|
||
"--requirement",
|
||
help="Path to the requirements file of the project",
|
||
required=True,
|
||
)
|
||
parser.add_argument(
|
||
"-o",
|
||
"--output",
|
||
help="Output file path with extension, e.g., './output/report.txt'",
|
||
required=True,
|
||
)
|
||
args = parser.parse_args()
|
||
|
||
base_url = "https://security.snyk.io/package/pip/"
|
||
# 分析项目依赖,包括名称和版本(如果有的话)
|
||
requirements = load_requirements(args.requirement)
|
||
# 传入依赖信息,url前缀,扫描结果输出位置
|
||
check_vulnerabilities(requirements, base_url, args.output)
|
||
print("Vulnerability scan complete. Results saved to", args.output)
|
||
|
||
|
||
if __name__ == "__main__":
|
||
main()
|