之前写了一个抓取BBS论坛所有帖子标题名的爬虫,不过该论坛已经把我封了,还是自己太年轻经验少,没有设置sleep time
# -*- coding=utf-8 -*-
from bs4 import BeautifulSoup
import urllib.request
import urllib
import re
#获取板块的链接
def get_Bankuan_link(url):
link_list = [] #用来收集板块的链接
html = urllib.request.urlopen(url)
bsObj = BeautifulSoup(html, "lxml")
#bsObj.find_all('dt')返回的是列表,所以要遍历
for node in bsObj.find_all('dt'):
# 遍历每个<dt>节点内的节点,如<a href="https://ask.hellobi.com/forum.php?mod=forumdisplay&fid=67">校园生活</a>
#来获取链接和板块名字
try:
node_link = node.contents[0]['href']
node_html = urllib.request.urlopen(url + node_link)
node_bsObj = BeautifulSoup(node_html,"lxml")
judge = node_bsObj.find('input', {'name': 'custompage'})
if judge is not None:
link_list.append(node_link)
except:
continue
#link_list.append(node_link)
return link_list #返回列表
#获取板块名
def get_Bankuan_name(url):
name_list = [] #用来收集板块名
html = urllib.request.urlopen(url)
bsObj = BeautifulSoup(html, "lxml")
#bsObj.find_all('dt')返回的是列表,所以要遍历。另外要清理不合规矩的板块
for node in bsObj.find_all('dt'):
try:
node_link = node.contents[0]['href']
node_html = urllib.request.urlopen(url + node_link)
node_bsObj = BeautifulSoup(node_html, "lxml")
judge = node_bsObj.find('input', {'name': 'custompage'})
if judge is not None:
name_list.append(node.contents[0].string)
except:
continue
return name_list
#获取板块最大页数
def get_page_max_list(url,error_count = 1):
page_max_list = []
Bankuan_link_list = get_Bankuan_link(url)
Bankuan_name_list = get_Bankuan_name(url)
for x in Bankuan_link_list:
html = urllib.request.urlopen(url + x)
bsObj = BeautifulSoup(html, "lxml")
error = Bankuan_link_list.index(x)
error_Bankuan_name = Bankuan_name_list[error]
try:
num = bsObj.find('input', {'name': 'custompage'}).next_sibling.string[3:-2]
page_max_list.append(num)
except:
print("Error%d: "%error_count,"%s 版块的get_page_max_list无法解决该板块,需要自己动手查找该页面页数"%error_Bankuan_name,'\n',
"该版块的网址是: http://bbs.csu.edu.cn/bbs/%s "%x, '\n'
"在版块名、版块链接列表中的第%d位置"%error)
print("="*100)
error_count = error_count + 1
return page_max_list
#base_url:BBS的链接
#bankuan_url:BBS内某一板块的url
def get_BBS_all_article_title(file_name, pages_count, bankuan_url, base_url = 'http://bbs.csu.edu.cn/bbs/', title_count = 1):
file_name = file_name+'.txt'
f = open(r'E:/Python/Spider_Net/未爬取的BBS/%s'%file_name, 'a')
html = urllib.request.urlopen(base_url+bankuan_url)
print('正在写入%s文件'%file_name)
for x in range(int(pages_count)):
page_num = x + 1
URL = base_url + bankuan_url + '&page=%d'%page_num
html = urllib.request.urlopen(URL)
#print('='*20,'正在写入%s文件'%file_name,'已经写了%d条帖子标题'%numlist_of_title)
try:
html = html.read().decode('gbk')
except:
continue
bsObj = BeautifulSoup(html, "lxml")
JieDian = bsObj.find_all("a", {"class": "s xst"})
for y in JieDian:
print(' ' * 15, '正在写入 %s文件' % file_name, ' 已经写了%d条帖子标题' % title_count, y.string)
try:
f.write(y.string + '\n')
title_count = title_count + 1
except:
continue
##########################################################################################
#初始化bbs的链接
base_url = 'http://bbs.cupl.edu.cn/'
File_name = get_Bankuan_name(base_url)
Pages_count = get_page_max_list(base_url)
Bankuan_link = get_Bankuan_link(base_url)
print(File_name)
print(Pages_count)
print(Bankuan_link)
for i in range(len(File_name)):
file_name = File_name[i]
pages_count = Pages_count[i]
bankuan_link = Bankuan_link[i]
get_BBS_all_article_title(file_name, pages_count, bankuan_link,base_url)
运行起来超级爽,抓取的数据大约4M多一点点吧。