Douban Spider

利用Python语言进行网页爬虫是比较常见的手段。这次利用了Github的资源对豆瓣读书的不同标签的书籍进行了爬虫。

编码与解码

首先需要解决编码和解码的问题,不然爬虫出来的结果都是乱码。详细的知识可参考:Encode and Decode.

1
2
3
4
        if isinstance(plain_text, str):
            plain_text=plain_text.encode('utf-8')
        else:
            plain_text=plain_text.decode().encode('utf-8')

反封

利用time.sleep函数模拟人浏览页面的速度:

       time.sleep(np.random.rand()*5)

分列

将不同的信息分类,以便于筛选:

1
ws[i].append (['序号', '书名', '评分', '评价人数', '作者', '出版信息'])

最终导出的XLSX文件的效果截图如下:

Douban List Example

源代码

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
#-*- coding: UTF-8 -*-

import sys
import time
import urllib
import urllib.request
import numpy as np
from bs4 import BeautifulSoup
from openpyxl import Workbook


#Some User Agents
hds=[{'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},\
{'User-Agent':'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'},\
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'}]


def book_spider(book_tag):
    page_num=0;
    book_list=[]
    try_times=0
    
    while(1):
    #for page_num in range(2): # For Test
        url='http://www.douban.com/tag/'+urllib.parse.quote(book_tag)+'/book?start='+str(page_num*15)
        time.sleep(np.random.rand()*5)
        
        #Last Version
        try:
            req = urllib.request.Request(url, headers=hds[page_num%len(hds)])
            source_code = urllib.request.urlopen(req).read()
            #source_code = urllib.request.build_opener(req).read()
            #plain_text=str(source_code)   
            plain_text=source_code
        except (urllib.error.HTTPError, urllib.error.URLError) as e:
            print(e.code)
            continue
        
        if isinstance(plain_text, str):
            plain_text=plain_text.encode('utf-8')
        else:
            plain_text=plain_text.decode().encode('utf-8')
  
        ##Previous Version, IP is easy to be Forbidden
        #source_code = requests.get(url) 
        #plain_text = source_code.text  
        
        soup = BeautifulSoup(plain_text, 'lxml')
        list_soup = soup.find('div', {'class': 'mod book-list'})
        
        try_times+=1;
        if list_soup==None and try_times<200:
            continue
        elif list_soup==None or len(list_soup)<=1:
            break # Break when no informatoin got after 200 times requesting
        
        for book_info in list_soup.findAll('dd'):
            title = book_info.find('a', {'class':'title'}).string.strip()
            desc = book_info.find('div', {'class':'desc'}).string.strip()
            desc_list = desc.split('/')
            book_url = book_info.find('a', {'class':'title'}).get('href')
            
            try:
                author_info = '/'.join(desc_list[0:-3])
            except:
                author_info ='暂无'
            try:
                pub_info = '/'.join(desc_list[-3:])
            except:
                pub_info = '暂无'
            try:
                rating = book_info.find('span', {'class':'rating_nums'}).string.strip()
            except:
                rating='0.0'
            try:
                #people_num = book_info.findAll('span')[2].string.strip()
                people_num = get_people_num(book_url)
                #people_num = people_num.strip('人评价')
            except:
                people_num ='0'
            
            book_list.append([title,rating,people_num,author_info,pub_info])
            try_times=0 #set 0 when got valid information
        page_num+=1
        print('Downloading Information From Page %d' % page_num)
    return book_list


def get_people_num(url):
    try:
        req = urllib.request.Request(url, headers=hds[np.random.randint(0,len(hds))])
        source_code = urllib.request.urlopen(req).read()
        plain_text=source_code  
    except (urllib.error.HTTPError, urllib.error.URLError) as e:
        print(e.code)
        
    if isinstance(plain_text, str):
        plain_text=plain_text.encode('utf-8')
    else:
        plain_text=plain_text.decode().encode('utf-8')
  
    soup = BeautifulSoup(plain_text, 'lxml')
    #people_num=soup.find('div',{'class':'rating_sum'}).findAll('span')[1].string.strip()
    people_num=soup.find('span',{'property':'v:votes'}).string.strip()
    return people_num


def do_spider(book_tag_lists):
    book_lists=[]
    for book_tag in book_tag_lists:
        book_list=book_spider(book_tag)
        book_list=sorted(book_list,key=lambda x:x[1],reverse=True)
        book_lists.append(book_list)
    return book_lists


def print_book_lists_excel(my_book_lists, my_book_tag_lists):
    wb = Workbook(write_only=True)
    ws = []
    for i in range (len (my_book_tag_lists)):
        ws.append (wb.create_sheet (title=my_book_tag_lists[i]))  # utf8->unicode
    for i in range (len (my_book_tag_lists)):
        ws[i].append (['序号', '书名', '评分', '评价人数', '作者', '出版信息'])
        count = 1
        for bl in my_book_lists[i]:
            ws[i].append ([count, bl[0], float (bl[1]), int (bl[2]), bl[3], bl[4]])
            count += 1
    save_path = 'book_list'
    for i in range (len (my_book_tag_lists)):
        save_path += ('-' + my_book_tag_lists[i])
    save_path += '.xlsx'
    wb.save(save_path)


if __name__=='__main__':
    tic = time.perf_counter()
    #### book_tag_lists = ['心理','判断与决策','算法','数据结构','经济','历史']
    #book_tag_lists = ['创业','理财','社会学','佛教']
    #### book_tag_lists = ['思想','科技','科学','web','股票','爱情','两性']
    #### book_tag_lists = ['计算机','机器学习','linux','数据库','互联网'] 
    #### book_tag_lists = ['数学']
    #book_tag_lists = ['摄影','设计','音乐','旅行','教育','成长','情感','育儿','健康','养生']
    #### book_tag_lists = ['商业','理财','管理']  
    #### book_tag_lists = ['名著']
    #### book_tag_lists = ['科普','经典','生活','心灵','文学']
    book_tag_lists = ['传记','哲学','编程','科幻','思维','金融']
    #book_tag_lists = ['个人管理','时间管理','投资','文化','宗教']
    #book_tag_lists = ['个人管理'] # for test
    book_lists=do_spider(book_tag_lists)
    print_book_lists_excel(book_lists,book_tag_lists)
    toc = time.perf_counter()
    print(f"The code runs for {toc - tic:0.4f} seconds")

好书一下

一个简单的WebApp接口方便自己挖掘查找和阅读好书:

好书一下,汲取精华

updatedupdated2020-08-052020-08-05