企业如何自动话收集域名资产
0xNvyao,公众号:安全随笔企业如何自动化收集域名资产
上一遍讲到如何自动话通过dns 托管平台的API接口自动化收集域名资产信息,本篇接着说收集好域名资产后如何扫描发现运维违规对外的域名。
扫描工具选择的是Httpx:
Httpx is a fast and multi-purpose HTTP toolkit
https://github.com/projectdiscovery/httpx
这款工具功能很强大,而且参数很丰富,我列了下我使用到的:
httpx -u ceye.io -title -sc -ct -rt -bp -server -cdn -json
-title:显示title标题
-sc:显示http响应码status-code
-ct:显示ontent-length
-rt:显示response time
-bp:display first N characters of response body (default 100)
-server:显示server name
-cdn:显示是否使用waf/cdn
-json:json格式结果
扫描脚本HttpxScan.py
# -*- coding: utf-8 -*-
import requests
import subprocess
import json
import datetime
import logging
import configparser
# 配置日志记录
logging.basicConfig(filename='./log/scanner.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# 读取配置文件
config = configparser.ConfigParser()
config.read('./conf/config.ini')
splunk_authorization = config.get('Splunk', 'Authorization')
splunk_url = config.get('API', 'url')
# 通过 Splunk 接口获取 subdomain 数据
def get_subdomains_from_file(file_path):
subdomains = []
with open(file_path, 'r') as file:
for line in file:
subdomain = line.strip() # 假设每行一个域名,去除首尾空白字符
subdomains.append(subdomain)
return subdomains
def scan_domains(subdomains):
for subdomain in subdomains:
# 构建扫描命令
command = ["/root/httpx/httpx", "-u", subdomain, "-title", "-sc", "-ct", "-rt", "-bp", "-server", "-cdn", "-json"]
try:
process = subprocess.run(command, capture_output=True, text=True, check=True, timeout=5, stdin=subprocess.DEVNULL)
logging.info("The httpx command is executed successfully! %s" % subdomain)
# 获取标准输出和标准错误输出
stdout = process.stdout.strip()
# 输出结果
if stdout:
# print(stdout)
data = json.loads(stdout) # 解析为字典
data['log_source'] = 'httpxscan' # 添加新的键值对
submit_data(data)
else:
# 构建json数据:{"timestamp": "2023-11-28T11:13:19.725004+08:00", "input": "www.xxxx.com", "failed": false}
current_time = datetime.datetime.now().astimezone()
data = {
"timestamp": current_time.isoformat(),
"input": subdomain,
"log_source": "httpxscan",
"failed": True
}
submit_data(data)
except subprocess.TimeoutExpired:
# 超时处理:构建超时的JSON数据
current_time = datetime.datetime.now().astimezone()
data = {
"timestamp": current_time.isoformat(),
"input": subdomain,
"log_source": "httpxscan",
"failed": True
}
submit_data(data)
except subprocess.CalledProcessError as e:
print(f"Error occurred while scanning {subdomain}: {e}")
def submit_data(data):
headers = {"Content-Type": "application/json",
"Authorization": splunk_authorization}
payload = {
"event": json.dumps(data),
"index": "test"
}
print(payload)
try:
response = requests.post(splunk_url, data=json.dumps(payload), headers=headers)
print(response.text)
response.raise_for_status()
logging.info("Data submitted successfully! %s" % data["input"])
except requests.exceptions.RequestException as e:
logging.error("Failed to submit data: %s" % str(e))
except json.JSONDecodeError as e:
logging.error("Failed to convert data to JSON: %s" % str(e))
def main():
# 获取当前日期
current_date = datetime.datetime.today().strftime("%Y%m%d")
# 从文件中获取 subdomain 数据
file_path = f'/root/DomainScan/log/subdomains_{current_date}.txt' # 域名数据文件路径
#print(file_path)
subdomains = get_subdomains_from_file(file_path)
# 扫描域名并获取扫描结果
scan_domains(subdomains)
if __name__ == "__main__":
main()
上一篇的代码也需要做些调整,我通过将上一篇识别到的域名资产落到本地文件中,然后上面Httpx脚本读取文件来扫描:
入口脚本main.py
# -*- coding: utf-8 -*-
import logging
from FofaScan import FofaScan
from CloudFlareScan import CloudflareScanner
from ConfigReader import ConfigReader
from AWSScan import AWSScan
from ComlaudeScan import ComlaudeScan
import datetime
def main():
# 读取配置文件
config_file = './conf/config.ini'
config_reader = ConfigReader(config_file)
# 获取当前日期
current_date = datetime.date.today().strftime("%Y%m%d")
# 定义文件路径和名称
output_file = f"./log/subdomains_{current_date}.txt"
# 创建并打开文件
file = open(output_file, "a")
# 设置日志配置
#logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logging.basicConfig(filename='./log/scanner.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# 创建 CloudflareScanner 实例并执行扫描
logging.info("创建 CloudflareScanner 实例并执行扫描")
cfscanner = CloudflareScanner(config_reader, file)
cfscanner.scanner()
# 创建 AWSScan 实例并执行扫描
logging.info("创建 AWSScan 实例并执行扫描")
awsscanner = AWSScan(config_reader, file)
awsscanner.search_domain()
# 创建 ComlaudeScan 实例并执行扫描
logging.info("创建 ComlaudeScan 实例并执行扫描")
comlaudeScan = ComlaudeScan(config_reader, file)
comlaudeScan.scanner()
# 创建 FofaScan 实例并执行扫描
fofascanner = FofaScan(config_reader, file)
fofascanner.scanner()
# 关闭文件
file.close()
if __name__ == "__main__":
main()
CloudFlare文件:
# -*- coding: utf-8 -*-
import requests
import json
import logging
class CloudflareScanner:
def __init__(self, config_reader, file):
self.cf_authorization = config_reader.get_value('cloudflare', 'Authorization')
self.splunk_authorization = config_reader.get_value('Splunk', 'Authorization')
self.api_url = config_reader.get_value('API', 'url')
self.file = file
# ....
#省略其他代码
def submit_data(self, data):
# ...
# 省略其他代码
self.file.write(subdomain + "n")
最终splunk收集到数据后,可以简单制作一个告警:
最终效果:
原文始发于微信公众号(安全随笔):企业如何自动扫描对外域名资产
免责声明:文章中涉及的程序(方法)可能带有攻击性,仅供安全研究与教学之用,读者将其信息做其他用途,由读者承担全部法律及连带责任,本站不承担任何法律及连带责任;如有问题可邮件联系(建议使用企业邮箱或有效邮箱,避免邮件被拦截,联系方式见首页),望知悉。
- 左青龙
- 微信扫一扫
-
- 右白虎
- 微信扫一扫
-
评论