-
Notifications
You must be signed in to change notification settings - Fork 15
/
Copy pathparse_doc.py
146 lines (103 loc) · 4.23 KB
/
parse_doc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import requests
import json
import re
import os
session=requests.session()
path="F:\\桌面\\Files"
if not os.path.exists(path):
os.mkdir(path)
def parse_txt1(code,doc_id):
content_url='https://wenku.baidu.com/api/doc/getdocinfo?callback=cb&doc_id='+doc_id
content=session.get(content_url).content.decode(code)
md5sum=re.findall('"md5sum":"(.*?)",',content)[0]
rsign=re.findall('"rsign":"(.*?)"',content)[0]
pn=re.findall('"totalPageNum":"(.*?)"',content)[0]
content_url='https://wkretype.bdimg.com/retype/text/'+doc_id+'?rn='+pn+'&type=txt'+md5sum+'&rsign='+rsign
content=json.loads(session.get(content_url).content.decode('gbk'))
result=''
for item in content:
for i in item['parags']:
result+=i['c']
return result
def parse_txt2(content,code,doc_id):
md5sum=re.findall('"md5sum":"(.*?)",',content)[0]
rsign=re.findall('"rsign":"(.*?)"',content)[0]
pn=re.findall('"show_page":"(.*?)"',content)[0]
content_url='https://wkretype.bdimg.com/retype/text/'+doc_id+'?rn='+pn+'&type=txt'+md5sum+'&rsign='+rsign
content=json.loads(session.get(content_url).content.decode('utf-8'))
result=''
for item in content:
for i in item['parags']:
result+=i['c']
return result
def parse_doc(content):
url_list=re.findall(r'(https.*?0.json.*?)\\x22}',content)
url_list=[addr.replace("\\\\\\/","/") for addr in url_list]
result=""
for url in set(url_list):
content=session.get(url).content.decode('gbk')
y=0
txtlists=re.findall(r'"c":"(.*?)".*?"y":(.*?),',content)
for item in txtlists:
# 当item[1]的值与前面不同时,代表要换行了
if not y==item[1]:
y=item[1]
n='\n'
else:
n=''
result+=n
result+=item[0].encode('utf-8').decode('unicode_escape','ignore')
return result
def save_file(title,filename,content):
with open(filename,'w',encoding='utf-8') as f:
f.write(content)
print("文件"+title+"保存成功")
f.close()
def main():
print("欢迎来到百度文库文件下载:")
print("-----------------------\r\n")
while True:
try:
print("1.doc \n 2.txt \n 3.ppt \n 4.xls\n 5.ppt\n")
types=input("请输入需要下载文件的格式(0退出):")
if types=="0":
break
if types not in ['txt','doc']:
print("抱歉功能尚未开发")
continue
url=input("请输入下载的文库URL地址:")
# 网页内容
response=session.get(url)
code=re.findall('charset=(.*?)"',response.text)[0]
if code.lower()!='utf-8':
code='gbk'
content=response.content.decode(code)
# 文件id
doc_id=re.findall('view/(.*?).html',url)[0]
# 文件类型
#types=re.findall(r"docType.*?:.*?'(.*?)'",content)[0]
# 文件主题
#title=re.findall(r"title.*?:.*?'(.*?)'",content)[0]
if types=='txt':
md5sum=re.findall('"md5sum":"(.*?)",',content)
if md5sum!=[]:
result=parse_txt2(content,code,doc_id)
title=re.findall(r'<title>(.*?). ',content)[0]
#filename=os.getcwd()+"\\Files\\"+title+'.txt'
filename=path+"\\"+title+".txt"
save_file(title,filename,result)
else:
result=parse_txt1(code,doc_id)
title=re.findall(r"title.*?:.*?'(.*?)'",content)[0]
#filename=os.getcwd()+"\\Files\\"+title+'.txt'
filename=path+"\\"+title+".txt"
save_file(title,filename,result)
elif types=='doc':
title=re.findall(r"title.*?:.*?'(.*?)'",content)[0]
result=parse_doc(content)
filename=path+"\\"+title+".doc"
save_file(title,filename,result)
except Exception as e:
print(e)
if __name__=='__main__':
main()