yulu

自如房源信息抓取-python3

先上代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import urllib.parse
import requests
import csv
import re
url = "http://www.ziroom.com/z/nl/z3-d23008618-b18335746.html?p={page}"
headers = {
'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection' : 'Keep-Alive',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
#算取总页数
r = requests.get("http://www.ziroom.com/z/nl/z3-d23008618-b18335746.html?p=1",headers=headers)
html = BeautifulSoup(r.text,"html.parser")
pages_str = html.select(".pages > span")[0].text
page_count = int(re.findall(r'[\d]+',pages_str)[0])
#开始抓取
page = 0
send_url = set()
house_list = []
info = []
while page < page_count:
page += 1
spliced_url = url.format(page=page)
print("start send:",spliced_url)
send_url.add(spliced_url)
response = requests.get(spliced_url,headers=headers)
html = BeautifulSoup(response.text,"html.parser")
house_list = html.select("ul > .clearfix")
for house in house_list:
house_name = house.select("h3")[0].text
house_price = house.select(".price")[0].text
#格式化价格字符串
house_price = "".join(house_price.split())
#房源详情
house_detail = house.select(".detail > p")[1].string.encode("utf8")
house_detail = bytes(house_detail).decode('utf8')
#print(house_detail)
info.append([house_name,house_price,house_detail])
#整理信息
#print(info)
with open("test.csv","w") as f:
f_writer = csv.writer(f,delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)
f_writer.writerows(info)

这只是一个简单的小例子,其中主要用到的库有:requests,BeautifulSoup。感觉在抓取的时候,稍微费劲的地方是找到结束页的结束标志。我这里并没有找,而是计算了一下页数。因为自如的页面设计的很巧妙,即使只有2页,你在输入大于2的数的时候一样会请求到数据,而且和之前的数据结够区别不是很大(也可能是因为我对BeautifulSoup库不熟悉,我也是第一次用😓 )。感觉这样处理不是很好。其他的基本都有详细的官方文档,我就不赘述了,实践永远是检验真理的唯一标准。