由于工作需要,这里学习了中科院软件所刘焕勇老师在github上的开源项目,基于知识图谱的医药领域问答项目QABasedOnMedicaKnowledgeGraph。 原项目地址:https://github.com/liuhuanyong/QASystemOnMedicalKG 自己动手实现了环境的搭建,目前实践到爬虫部分,在此记录,欢迎大家提出意见。
参考博客mongodb安装及创建用户 按照文中的说明下载和配置mongodb,并启动服务,打开网址,出现如下语句说明启动成功:
It looks like you are trying to access MongoDB over HTTP on the native driver port.为了便于验证数据库是否建立成功,这里给出了几个常用的数据库语法:
show dbs 查看已有的数据库 use db_name 如果该数据库存在则进入,若不存在则创建名称为db_name的数据库 db.dropDatabase() 删除该数据库 db.jc.find() 查看数据库中jc表中的数据
在对症状的解析函数中,源代码对有的网页解析得到的结果是人名,因此我对其进行了修改,详见代码。运行之后就在数据库中建立该数据库,并存入了爬取的数据。
# -*- coding: utf-8 -*- import urllib.request import urllib.parse from lxml import etree import pymongo # 创建数据库连接,开启Mongodb服务之后,不存在该数据库的话会自动创建该数据库 conn = pymongo.MongoClient() db = conn['medical'] col = db['data'] # 爬取数据并解析 def get_html(url): headers = {'User-Agent':'Mozilla/5.0(Window Nt 10.0; WOW64) AppleWebKit/537.36 (KHTML, LIKE Gecko)'} req = urllib.request.Request(url=url, headers=headers) res = urllib.request.urlopen(req) html = res.read().decode('gbk') return html def spider_main(): for page in range(1, 11000): try: basic_url = 'http://jib.xywy.com/il_sii/gaishu/%s.htm'% page cause_url = 'http://jib.xywy.com/il_sii/cause/%s.htm' % page # 病因 prevent_url = 'http://jib.xywy.com/il_sii/prevent/%s.htm' % page # 预防 symptom_url = 'http://jib.xywy.com/il_sii/symptom/%s.htm' % page # 症状 inspect_url = 'http://jib.xywy.com/il_sii/inspect/%s.htm' % page # 检查方法 treat_url = 'http://jib.xywy.com/il_sii/treat/%s.htm' % page # 治疗 food_url = 'http://jib.xywy.com/il_sii/food/%s.htm' % page # 饮食保健 drug_url = 'http://jib.xywy.com/il_sii/drug/%s.htm' % page # 好评药品 data = {} data['url'] = basic_url data['basic_info'] = basicinfo_spider(basic_url) data['cause_info'] = common_spider(cause_url) data['prevent_info'] = common_spider(prevent_url) data['symptom'] = symptom_spider(symptom_url) data['inspect_info'] = inspect_spider(inspect_url) data['treat_info'] = treat_sipder(treat_url) data['food_info'] = food_spider(food_url) data['drug_info'] = drug_spider(drug_url) print(page, basic_url) col.insert(data) except: print('error') return '''基本信息解析''' def basicinfo_spider(url): html = get_html(url) selector = etree.HTML(html) title = selector.xpath('//title/text()')[0] category = selector.xpath('//div[@class="wrap mt10 nav-bar"]/a/text()') desc = selector.xpath('//div[@class="jib-articl-con jib-lh-articl"]/p/text()') ps = selector.xpath('//div[@class="mt20 articl-know"]/p') infobox = [] for p in ps: info = p.xpath('string(.)').replace('\r','').replace('\n','').replace('\xa0', '').replace(' ', '').replace('\t','') infobox.append(info) basic_data = {} basic_data['category'] = category basic_data['name'] = title.split('的简介')[0] basic_data['desc'] = desc basic_data['attributes'] = infobox return basic_data # 对网页分别进行解析 def treat_sipder(url): html = get_html(url) selector = etree.HTML(html) ps = selector.xpath('//div[starts-with(@class, "mt20 articl-know")]/p') infobox = [] for p in ps: info = p.xpath('string(.)').replace('\r','').replace('\n','').replace('\xa0','').replace(' ','').replace('\t','') infobox.append(info) # print(infobox) return infobox def drug_spider(url): html = get_html(url) selector = etree.HTML(html) drugs = [i.replace('\n','').replace('\t', '').replace(' ','') for i in selector.xpath('//div[@class="fl drug-pic-rec mr30"]/p/a/text()')] # print(drugs) return drugs '''food治疗解析''' def food_spider(url): html = get_html(url) selector = etree.HTML(html) divs = selector.xpath('//div[@class="diet-img clearfix mt20"]') try: food_data = {} food_data['good'] = divs[0].xpath('./div/p/text()') food_data['bad'] = divs[1].xpath('./div/p/text()') food_data['recommand'] = divs[2].xpath('./div/p/text()') except: return {} return food_data '''症状信息解析''' def symptom_spider(url): html = get_html(url) selector = etree.HTML(html) # 源代码中的结果是人名,因此对其进行了修改。 symptoms = selector.xpath('//span[@class="db f12 lh240 mb15 "]/a/text()') ps = selector.xpath('//p') detail = [] for p in ps: info = p.xpath('string(.)').replace('\r','').replace('\n','').replace('\xa0', '').replace(' ', '').replace('\t','') detail.append(info) symptoms_data = {} symptoms_data['symptoms'] = symptoms symptoms_data['symptoms_detail'] = detail # print(symptoms_data) return symptoms, detail def inspect_spider(url): '''对检查项目的链接进行获取,有的网页时有检查链接的,有的是没有的''' html = get_html(url) selector = etree.HTML(html) inspects = selector.xpath('//li[@class="check-item"]/a/@href') return inspects '''通用解析模块''' def common_spider(url): html = get_html(url) selector = etree.HTML(html) ps = selector.xpath('//p') infobox = [] for p in ps: info = p.xpath('string(.)').replace('\r', '').replace('\n', '').replace('\xa0', '').replace(' ','').replace('\t', '') if info: infobox.append(info) return '\n'.join(infobox) '''检查项抓取模块''' def inspect_crawl(): for page in range(1, 3685): try: url = 'http://jck.xywy.com/jc_%s.html'%page html = get_html(url) data = {} data['url'] = url data['html'] = html db['jc'].insert(data) # print(data) except Exception as e: print(e) spider_main() inspect_crawl()完成之后,使用如下命令可以导出数据
mongoexport -d test -c medical -o medical.json其中,-d test 是指明数据库, -c medical 是指明要导出的列表 -o medical.json 是指明导出的文件名,可以在前边指定位置