Compare commits

..

5 Commits

Author SHA1 Message Date
dqy
9155bf7a00 Merge branch 'feature/match' of https://git.mamahaha.work/sangge/BackDoorBuster into feature/match
Some checks failed
Detection test / build (pull_request) Failing after 44s
2024-04-20 11:13:44 +08:00
dqy
8c3616e90f feat: 添加unittest测试 2024-04-20 11:13:37 +08:00
dqy
8dc486cf47 fix: 修改文件名称 2024-04-20 11:12:56 +08:00
dqy
9e5640ad80 ci: 添加action自动测试 2024-04-20 11:12:28 +08:00
dqy
3d961aa2d7 fix: 确保测试文件正确引入模块 2024-04-20 11:12:04 +08:00
7 changed files with 89 additions and 31 deletions

26
.github/workflows/detection-test.yml vendored Normal file
View File

@ -0,0 +1,26 @@
name: Detection test
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8
uses: actions/setup-python@v1
with:
python-version: 3.8
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
# 如果你的测试需要其他依赖可以在这里添加pip install命令
- name: Run tests
run: |
python -m unittest discover -s tests

0
__init__.py Normal file
View File

0
detection/__init__.py Normal file
View File

View File

@ -1,5 +1,5 @@
"""
Usage: python match.py your_file_path
Usage: python backdoor_detection.py your_file_path
"""
import re
@ -30,6 +30,7 @@ def read_file_content(file_path: str) -> str:
def find_dangerous_functions(file_content: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Searches the given code text for potentially dangerous function calls and classifies results by risk level.
Ignores comments in the code.
:param file_content: String content of the code file.
:return: Dictionary with risk levels as keys and lists of tuples (line number, matched line content) as values.
@ -45,10 +46,14 @@ def find_dangerous_functions(file_content: str) -> Dict[str, List[Tuple[int, str
# Store results classified by risk level
classified_results = {"high": [], "medium": [], "low": []}
for line_number, line in enumerate(file_content.split("\n"), start=1):
# Remove comments from the line
clean_line = line.split("#")[0].strip()
if not clean_line: # Skip empty or comment-only lines
continue
found = False
for pattern, risk_level in patterns.items():
if re.search(pattern, line):
classified_results[risk_level].append((line_number, line.strip()))
if re.search(pattern, clean_line):
classified_results[risk_level].append((line_number, clean_line))
found = True
break # Stop checking other patterns once a match is found
return classified_results

View File

@ -1,28 +0,0 @@
"""
危险函数测试
"""
import os
# 潜在的危险函数调用示例
os.system("ls")
eval("2 + 2")
exec("print('Executing dangerous exec function')")
popen_result = os.popen('echo "Hello World"').read()
print(popen_result)
# 一些正常操作
print("This is a safe print statement.")
result = sum([1, 2, 3])
print("Sum result:", result)
# 尝试使用 subprocess 以更安全的方式调用外部命令
import subprocess
subprocess.run(["echo", "Subprocess run is safer than os.system"])
# 错误的函数调用尝试
try:
os.system("rm -rf /") # 非常危险的调用,应避免在实际环境中使用
except:
print("Failed to execute dangerous system call.")

0
tests/__init__.py Normal file
View File

View File

@ -0,0 +1,55 @@
import unittest
from detection.backdoor_detection import find_dangerous_functions
class TestBackdoorDetection(unittest.TestCase):
def test_high_risk_detection(self):
content = """import os
os.system('rm -rf /') # high risk
exec('print("Hello")') # high risk
eval('2 + 2') # high risk
"""
results = find_dangerous_functions(content)
self.assertIn((2, "os.system('rm -rf /')"), results["high"])
self.assertIn((3, "exec('print(\"Hello\")')"), results["high"])
self.assertIn((4, "eval('2 + 2')"), results["high"])
def test_medium_risk_detection(self):
content = """import subprocess
subprocess.run(['ls', '-l']) # medium risk
import os
os.popen('ls') # medium risk
"""
results = find_dangerous_functions(content)
self.assertIn((2, "subprocess.run(['ls', '-l'])"), results["medium"])
self.assertIn((4, "os.popen('ls')"), results["medium"])
def test_no_risk_detection(self):
content = """a = 10
b = a + 5
print('This should not be detected as risky.')
"""
results = find_dangerous_functions(content)
self.assertEqual(len(results["high"]), 0)
self.assertEqual(len(results["medium"]), 0)
self.assertEqual(len(results["low"]), 0)
def test_inclusion_of_comments(self):
content = """# Just a comment line
print('This is a safe line')
eval('2 + 2') # This should be high risk
subprocess.run(['echo', 'hello']) # This should be medium risk
"""
results = find_dangerous_functions(content)
self.assertIn(
(3, "eval('2 + 2')"),
results["high"],
)
self.assertIn(
(4, "subprocess.run(['echo', 'hello'])"),
results["medium"],
)
if __name__ == "__main__":
unittest.main()