Compare commits

..

No commits in common. "9155bf7a001c4f8c505a83a949ad30c8afc9cb20" and "a329bd41ad07feb5b073447aa613d9521ca6fc11" have entirely different histories.

7 changed files with 31 additions and 89 deletions

View File

@ -1,26 +0,0 @@
name: Detection test
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
# 如果你的测试需要其他依赖可以在这里添加pip install命令
- name: Run tests
run: |
python -m unittest discover -s tests

View File

View File

View File

@ -1,5 +1,5 @@
""" """
Usage: python backdoor_detection.py your_file_path Usage: python match.py your_file_path
""" """
import re import re
@ -30,7 +30,6 @@ def read_file_content(file_path: str) -> str:
def find_dangerous_functions(file_content: str) -> Dict[str, List[Tuple[int, str]]]: def find_dangerous_functions(file_content: str) -> Dict[str, List[Tuple[int, str]]]:
""" """
Searches the given code text for potentially dangerous function calls and classifies results by risk level. Searches the given code text for potentially dangerous function calls and classifies results by risk level.
Ignores comments in the code.
:param file_content: String content of the code file. :param file_content: String content of the code file.
:return: Dictionary with risk levels as keys and lists of tuples (line number, matched line content) as values. :return: Dictionary with risk levels as keys and lists of tuples (line number, matched line content) as values.
@ -46,14 +45,10 @@ def find_dangerous_functions(file_content: str) -> Dict[str, List[Tuple[int, str
# Store results classified by risk level # Store results classified by risk level
classified_results = {"high": [], "medium": [], "low": []} classified_results = {"high": [], "medium": [], "low": []}
for line_number, line in enumerate(file_content.split("\n"), start=1): for line_number, line in enumerate(file_content.split("\n"), start=1):
# Remove comments from the line
clean_line = line.split("#")[0].strip()
if not clean_line: # Skip empty or comment-only lines
continue
found = False found = False
for pattern, risk_level in patterns.items(): for pattern, risk_level in patterns.items():
if re.search(pattern, clean_line): if re.search(pattern, line):
classified_results[risk_level].append((line_number, clean_line)) classified_results[risk_level].append((line_number, line.strip()))
found = True found = True
break # Stop checking other patterns once a match is found break # Stop checking other patterns once a match is found
return classified_results return classified_results

View File

@ -0,0 +1,28 @@
"""
危险函数测试
"""
import os
# 潜在的危险函数调用示例
os.system("ls")
eval("2 + 2")
exec("print('Executing dangerous exec function')")
popen_result = os.popen('echo "Hello World"').read()
print(popen_result)
# 一些正常操作
print("This is a safe print statement.")
result = sum([1, 2, 3])
print("Sum result:", result)
# 尝试使用 subprocess 以更安全的方式调用外部命令
import subprocess
subprocess.run(["echo", "Subprocess run is safer than os.system"])
# 错误的函数调用尝试
try:
os.system("rm -rf /") # 非常危险的调用,应避免在实际环境中使用
except:
print("Failed to execute dangerous system call.")

View File

View File

@ -1,55 +0,0 @@
import unittest
from detection.backdoor_detection import find_dangerous_functions
class TestBackdoorDetection(unittest.TestCase):
def test_high_risk_detection(self):
content = """import os
os.system('rm -rf /') # high risk
exec('print("Hello")') # high risk
eval('2 + 2') # high risk
"""
results = find_dangerous_functions(content)
self.assertIn((2, "os.system('rm -rf /')"), results["high"])
self.assertIn((3, "exec('print(\"Hello\")')"), results["high"])
self.assertIn((4, "eval('2 + 2')"), results["high"])
def test_medium_risk_detection(self):
content = """import subprocess
subprocess.run(['ls', '-l']) # medium risk
import os
os.popen('ls') # medium risk
"""
results = find_dangerous_functions(content)
self.assertIn((2, "subprocess.run(['ls', '-l'])"), results["medium"])
self.assertIn((4, "os.popen('ls')"), results["medium"])
def test_no_risk_detection(self):
content = """a = 10
b = a + 5
print('This should not be detected as risky.')
"""
results = find_dangerous_functions(content)
self.assertEqual(len(results["high"]), 0)
self.assertEqual(len(results["medium"]), 0)
self.assertEqual(len(results["low"]), 0)
def test_inclusion_of_comments(self):
content = """# Just a comment line
print('This is a safe line')
eval('2 + 2') # This should be high risk
subprocess.run(['echo', 'hello']) # This should be medium risk
"""
results = find_dangerous_functions(content)
self.assertIn(
(3, "eval('2 + 2')"),
results["high"],
)
self.assertIn(
(4, "subprocess.run(['echo', 'hello'])"),
results["medium"],
)
if __name__ == "__main__":
unittest.main()