基本模块
python爬虫,web spider。爬取网站获取网页数据,并进行分析提取。
基本模块使用的是 urllib,urllib2,re,等模块
基本用法,例子:
(1)进行基本get请求,获取网页html
#!coding=utf-8
import urllib
import urllib2
url = ‘http://www.baidu.com/’
# 获取请求
request = urllib2.request(url)
try:
# 根据request,得到返回response
response = urllib2.urlopen(request)
except urllib2.httperror, e:
if hasattr(e, ‘reason’):
print e.reason
# 读取response的body
html = response.read()
# 读取response的headers
headers = response.info()
(2)表单提交
#!coding=utf-8
import urllib2
import urllib
post_url = ”
post_data = urllib.urlencode({
‘username’: ‘username’,
‘password’: ‘password’,
})
post_headers = {
‘user-agent’: ‘mozilla/5.0 (x11; ubuntu; linux i686; rv:31.0) gecko/20100101 firefox/31.0’,
}
request = urllib2.request(
url=post_url,
data=post_data,
headers=post_headers,
)
response = urllib2.urlopen(request)
html = response.read()
(3)
#!coding=utf-8
import urllib2
import re
page_num = 1
url = ‘http://tieba.baidu.com/p/3238280985?see_lz=1&pn=’+str(page_num)
mypage = urllib2.urlopen(url).read().decode(‘gbk’)
myre = re.compile(r’>(.*?)
‘, re.dotall)
items = myre.findall(mypage)
f = open(‘baidu.txt’, ‘a+’)
import sys
reload(sys)
sys.setdefaultencoding(‘utf-8’)
i = 0
texts = []
for item in items:
i += 1
print i
text = item.replace(”, ”)
text.replace(‘\n’, ”).replace(‘ ‘, ”) + ‘\n’
print text
f.write(text)
f.close()
(4)
#coding:utf-8
”’
模拟登陆163邮箱并下载邮件内容
”’
import urllib
import urllib2
import cookielib
import re
import time
import json
class email163:
header = {‘user-agent’:’mozilla/5.0 (windows; u; windows nt 6.1; en-us; rv:1.9.1.6) gecko/20091201 firefox/3.5.6′}
user = ”
cookie = none
sid = none
mailbaseurl=’http://twebmail.mail.163.com’
def __init__(self):
self.cookie = cookielib.cookiejar()
cookiepro = urllib2.httpcookieprocessor(self.cookie)
urllib2.install_opener(urllib2.build_opener(cookiepro))
def login(self,user,pwd):
”’
登录
”’
postdata = urllib.urlencode({
‘username’:user,
‘password’:pwd,
‘type’:1
})
#注意版本不同,登录url也不同
req = urllib2.request(
url=’https://ssl.mail.163.com/entry/coremail/fcg/ntesdoor2?func+user+’&,
data=postdata,
headers=self.header,
)
res = str(urllib2.urlopen(req).read())
#print res
patt = re.compile(‘s]+)’,re.i)
patt = patt.search(res)
uname = user.split(‘@’)[0]
self.user = user
if patt:
self.sid = patt.group(1).strip()
#print self.sid
print ‘%s login successful…..’%(uname)
else:
print ‘%s login failed….’%(uname)
def getinbox(self):
”’
获取邮箱列表
”’
print ‘\nget mail lists…..\n’
sid = self.sid
url = self.mailbaseurl+’/jy3/list/list.do?s&f
res = urllib2.urlopen(url).read()
#获取邮件列表
maillist = []
patt = re.compile(‘]+>.*?href=”([^”]+)”[^>]+>(.*?).*?]+>.*?href=”[^>]+>(.*?)’,re.i|re.s)
patt = patt.findall(res)
if patt==none:
return maillist
for i in patt:
line = {
‘from’:i[1].decode(‘utf8’),
‘url’:self.mailbaseurl+i[0],
‘subject’:i[2].decode(‘utf8′)
}
maillist.append(line)
return maillist
def getmailmsg(self,url):
”’
下载邮件内容
”’
content=”
print ‘\n download…..%s\n’%(url)
res = urllib2.urlopen(url).read()
patt = re.compile(‘contenturl:”([^”]+)”‘,re.i)
patt = patt.search(res)
if patt==none:
return content
url = ‘%s%s’%(self.mailbaseurl,patt.group(1))
time.sleep(1)
res = urllib2.urlopen(url).read()
djson = json.jsondecoder(encoding=’utf8’)
jsonres = djson.decode(res)
if ‘resultvar’ in jsonres:
content = djson.decode(res)[‘resultvar’]
time.sleep(3)
return content
”’
demon
”’
#初始化
mail163 = email163()
#登录
mail163.login(‘lpe234@163.com’,’944898186′)
time.sleep(2)
#获取收件箱
elist = mail163.getinbox()
#获取邮件内容
for i in elist:
print ‘主题:%s 来自:%s 内容:\n%s’%(i[‘subject’].encode(‘utf8’),i[‘from’].encode(‘utf8’),mail163.getmailmsg(i[‘url’]).encode(‘utf8’))
(5)需要登陆的情况
#1 cookie的处理
import urllib2, cookielib
cookie_support= urllib2.httpcookieprocessor(cookielib.cookiejar())
opener = urllib2.build_opener(cookie_support, urllib2.httphandler)
urllib2.install_opener(opener)
content = urllib2.urlopen(‘http://xxxx’).read()
#2 用代理和cookie
opener = urllib2.build_opener(proxy_support, cookie_support, urllib2.httphandler)
#3 表单的处理
import urllib
postdata=urllib.urlencode({
‘username’:’xxxxx’,
‘password’:’xxxxx’,
‘continueuri’:’http://www.verycd.com/’,
‘fk’:fk,
‘login_submit’:’登录’
})
req = urllib2.request(
url = ‘http://secure.verycd.com/signin/*/http://www.verycd.com/’,
data = postdata
)
result = urllib2.urlopen(req).read()
#4 伪装成浏览器访问
headers = {
‘user-agent’:’mozilla/5.0 (windows; u; windows nt 6.1; en-us; rv:1.9.1.6) gecko/20091201 firefox/3.5.6′
}
req = urllib2.request(
url = ‘http://secure.verycd.com/signin/*/http://www.verycd.com/’,
data = postdata,
headers = headers
)
#5 反”反盗链”
headers = {
‘referer’:’http://www.cnbeta.com/articles’
}
(6)多线程
from threading import thread
from queue import queue
from time import sleep
#q是任务队列
#num是并发线程总数
#jobs是有多少任务
q = queue()
num = 2
jobs = 10
#具体的处理函数,负责处理单个任务
def do_somthing_using(arguments):
print arguments
#这个是工作进程,负责不断从队列取数据并处理
def working():
while true:
arguments = q.get()
do_somthing_using(arguments)
sleep(1)
q.task_done()
#fork num个线程等待队列
for i in range(num):
t = thread(target=working)
t.setdaemon(true)
t.start()
#把jobs排入队列
for i in range(jobs):
q.put(i)
#等待所有jobs完成
q.join()
scrapy框架
scrapy框架,python开发的一个快速,高层次的屏幕抓取和web抓取框架,用于抓取web站点并从页面中提取结构化的数据。scrapy用途广泛,可以用于数据挖掘、监测和自动化测试。
刚开始学习这个框架。不太好评论。只是感觉这个框架有些java的感觉,需要太多的其他模块的支持。
(一)创建 scrapy 项目
# 使用 scrapy startproject scrapy_test
├── scrapy_test
│ ├── scrapy.cfg
│ └── scrapy_test
│ ├── __init__.py
│ ├── items.py
│ ├── pipelines.py
│ ├── settings.py
│ └── spiders
│ ├── __init__.py
# 进行创建 scrapy 项目
(二)说明
scrapy.cfg: 项目配置文件
items.py: 需要提取的数据结构定义文件
pipelines.py:管道定义,用来对items里面提取的数据做进一步处理,如保存等
settings.py: 爬虫配置文件
spiders: 放置spider的目录
(三)依赖包
依赖包比较麻烦。
# python-dev 包的安装
apt-get install python-dev
# twisted, w3lib, six, queuelib, cssselect, libxslt
pip install w3lib
pip install twisted
pip install lxml
apt-get install libxml2-dev libxslt-dev
apt-get install python-lxml
pip install cssselect
pip install pyopenssl
sudo pip install service_identity
# 安装好之后,便可使用 scrapy startproject test 进行创建项目
(四)抓取实例。
(1)创建scrapy项目
dizzy@dizzy-pc:~/python/spit$ scrapy startproject itzhaopin
new scrapy project ‘itzhaopin’ created in:
/home/dizzy/python/spit/itzhaopin
you can start your first spider with:
cd itzhaopin
scrapy genspider example example.com
dizzy@dizzy-pc:~/python/spit$
dizzy@dizzy-pc:~/python/spit$ cd itzhaopin
dizzy@dizzy-pc:~/python/spit/itzhaopin$ tree
.
├── itzhaopin
│ ├── __init__.py
│ ├── items.py
│ ├── pipelines.py
│ ├── settings.py
│ └── spiders
│ └── __init__.py
└── scrapy.cfg
# scrapy.cfg: 项http://my.oschina.net/lpe234/admin/new-blog目配置文件
# items.py: 需要提取的数据结构定义文件
# pipelines.py:管道定义,用来对items里面提取的数据做进一步处理,如保存等
# settings.py: 爬虫配置文件
# spiders: 放置spider的目录
(2)定义要抓取的数据结构 items.py
from scrapy.item import item, field
# 定义我们要抓取的数据
class tencentitem(item):
name = field() # 职位名称
catalog = field() # 职位类别
worklocation = field() # 工作地点
recruitnumber = field() # 招聘人数
detaillink = field() # 职位详情链接
publishtime = field() # 发布时间
(3)实现spider类
spider是继承自 scarpy.contrib.spiders.crawlspider 的python类,有3个必须定义的成员。
name : 名称,spider的标识。
start_urls : 一个url列表,spider从这些网页开始抓取
parse() : 一个方法。当start_urls里面的网页抓取下来之后需要调用这个方法来解析网页内容,同时需要返回下一个需要抓取的网页,或者返回items列表。
在spiders目录下面新建一个spider,tencent_spider.py :
#coding=utf-8
from scrapy.spider import basespider
class dmozspider(basespider):
name = ‘dmoz’
allowed_domains = [‘dmoz.org’]
start_urls = [
‘http://www.dmoz.org/computers/programming/languages/python/books/’,
‘http://www.dmoz.org/computers/programming/languages/python/resources/’
]
def parse(self, response):
filename = response.url.split(‘/’)[-2]
open(filename, ‘wb’).write(response.info)
这个简单一些。 使用scrapy crawl dmoz # 即可运行spider