python下载懒人图库javascript特效

这是一个简单的python脚本,主要从懒人图库下载javascript特效模板,在脚本中使用了gevent这个第三方库,使用的时候需要先安装。

#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib,os,sys
import gevent,re
from gevent import monkey
from bs4 import beautifulsoup
gevent.monkey.patch_socket()
”’
description:python 爬虫抓取懒人图库的js脚本模板
author:admin
create-date:2015-05-25
version:1.0
”’
http_url = ‘http://www.lanrentuku.com%s’
download_url = http_url[:-2] + ‘/js/d%szip’
reg=r’\d{1,}\.+’
def encode(text):
return text.encode(“utf8”)
def createdirectory(curpath):
mypath = os.path.join(getsubdirectory(), u’js代码模板’)
if not os.path.exists(mypath):
os.mkdir(mypath)
return os.path.join(mypath, curpath)
def getsubdirectory():
return os.getcwd()
def schedule(a, b, c):
per = 100.0 * a * b / c
if per > 100 :
per = 100
sys.stdout.write(‘%.1f%%\r’ % per)
sys.stdout.flush()
def geturllist(url):
url_list = {}
html = urllib.urlopen(url)
content = html.read()
html.close()
# 用beautifulsoup解析
decodehtml = beautifulsoup(content)
try:
atags = decodehtml.find_all(‘p’, {‘class’:’list-pngjs’})[0].find_all(‘a’)
except indexerror, e:
print e
atags = none
# 获取链接地址和标题
if atags is not none:
for a_tag in atags:
url_list[http_url % a_tag.get(‘href’)] = a_tag.get_text()
return url_list
def download(down_url):
try:
m=re.search(reg,down_url[0])
name = download_url % m.group(0)
urllib.urlretrieve(name,createdirectory(down_url[1] + name[-4:]),schedule)
except exception, e:
print e.message
def getpageurl(xurl):
# 进行列表页循环
return [xurl % page for page in xrange(1,49)]
if __name__ == ‘__main__’:
jobs = []
pageurl = getpageurl(‘http://www.lanrentuku.com/js/p%s.html’)
# 爬取所有链接
for i in pageurl:
for k in geturllist(i).items():
jobs.append(gevent.spawn(download, k))
gevent.joinall(jobs)

以上所述就是本文的全部内容了,希望大家能够喜欢。

Posted in 未分类

发表评论