libc_database自带的.get更新太慢。写了个爬虫从https://libc.blukat.me/?q=_rtld_global%3A0上下载so文件,将下载的so文件放在./add *.so 进行添加到库
#!C:\Python3.7
# -*- coding:utf-8 -*-
from selenium import webdriver
import requests
from fake_useragent import UserAgent
import os
def libc_spider(url):
basepath = os.getcwd()
path = os.path.join(basepath,'libc')
if not os.path.exists(path):
os.mkdir(path)
option = webdriver.ChromeOptions()
option.add_argument('headless')
drive_path = 'C:\Program Files (x86)\Google\Chrome\Application\chromedriver'
driver = webdriver.Chrome(executable_path=drive_path,chrome_options=option)
baseurl = "https://libc.blukat.me/d/"
try:
driver.get(url)
#print(driver.page_source)
elements = driver.find_elements_by_xpath('//div[@class="panel-body"]/div/a')
for elemet in elements:
filename = elemet.text + ".so"
url = baseurl+filename
file_path = os.path.join(path,filename)
down_libc(url,file_path,filename)
print("down load file directory:",path)
except Exception as e:
print(e)
exit(0)
finally:
driver.close()
def down_libc(url,file_path,filename):
try:
req = requests.get(url)
#print(req.content)
with open(file_path,'wb') as fw:
fw.write(req.content)
print("download:",filename)
except Exception as e:
print(e)
finally:
req.close()
def main():
url ="https://libc.blukat.me/?q=_rtld_global%3A0"
libc_spider(url)
# so_url ='https://libc.blukat.me/d/libc6-amd64_2.10.1-0ubuntu15_i386.so'
# down_libc(so_url)
if __name__ == '__main__':
main()