清华大佬耗费三个月吐血整理的几百G的资源,免费分享!....>>>
<无详细内容>#-*- coding: utf-8 -*- import urllib.request import re from _io import open def yunpan_search(): url = "https://www.zhihu.com/explore" req = urllib.request.Request(url, headers = { 'Connection': 'Keep-Alive', 'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko' }) opener = urllib.request.urlopen(req) html = opener.read() html = html.decode('utf-8') rex = '(?<=<textarea class="content hidden">\n).*?(?=<span class="answer-date-link-wrap">)' m = re.findall(rex,html,re.S) f = open('/root/Desktop/zhihu.txt','w') for i in m: f.write(i) f.write('\n\n') f.close() print("抓取成功!") file = open('/root/Desktop/zhihu.txt','r+') fullfile = file.readlines() text = [] p = re.compile(r'\w*', re.L) pp = re.compile(r"(&;)*") for line in fullfile: lines = p.sub('',line) liness = pp.sub('',lines) text.append(liness) file.seek(0) file.truncate(0) file.writelines(text) file.close() print("处理成功!") if __name__=='__main__': yunpan_search()