一个简单的python查询百度关键词排名的函数,特点:
1、ua随机
2、操作简单方便,直接getrank(关键词,域名)就可以了
3、编码转化。编码方面应该没啥问题了。
4、结果丰富。不仅有排名,还有搜索结果的title,url,快照时间,符合seo需求
缺点:
单线程,速度慢
#coding=utf-8
import requests
import beautifulsoup
import re
import random
def decodeanyword(w):
try:
w.decode(‘utf-8’)
except:
w = w.decode(‘gb2312’)
else:
w = w.decode(‘utf-8’)
return w
def createurl(checkword): #create baidu url with search words
checkword = checkword.strip()
checkword = checkword.replace(‘ ‘, ‘+’).replace(‘\n’, ”)
baiduurl = ‘http://www.baidu.com/s?wd=%s&rn=100’ % checkword
return baiduurl
def getcontent(baiduurl): #get the content of the serp
ualist = [‘mozilla/4.0+(compatible;+msie+6.0;+windows+nt+5.1;+sv1;+.net+clr+1.1.4322;+tencenttraveler)’,
‘mozilla/4.0+(compatible;+msie+6.0;+windows+nt+5.1;+sv1;+.net+clr+2.0.50727;+.net+clr+3.0.4506.2152;+.net+clr+3.5.30729)’,
‘mozilla/5.0+(windows+nt+5.1)+applewebkit/537.1+(khtml,+like+gecko)+chrome/21.0.1180.89+safari/537.1’,
‘mozilla/4.0+(compatible;+msie+6.0;+windows+nt+5.1;+sv1)’,
‘mozilla/5.0+(windows+nt+6.1;+rv:11.0)+gecko/20100101+firefox/11.0’,
‘mozilla/4.0+(compatible;+msie+8.0;+windows+nt+5.1;+trident/4.0;+sv1)’,
‘mozilla/4.0+(compatible;+msie+8.0;+windows+nt+5.1;+trident/4.0;+gtb7.1;+.net+clr+2.0.50727)’,
‘mozilla/4.0+(compatible;+msie+8.0;+windows+nt+5.1;+trident/4.0;+kb974489)’]
headers = {‘user-agent’: random.choice(ualist)}
iplist = [‘202.43.188.13:8080’,
‘80.243.185.168:1177’,
‘218.108.85.59:81’]
proxies = {‘http’: ‘http://%s’ % random.choice(iplist)}
r = requests.get(baiduurl, headers = headers, proxies = proxies)
return r.content
def getlasturl(rawurl): #get final url while there’re redirects
r = requests.get(rawurl)
return r.url
def getatext(atext): #get the text with and
pat = re.compile(r'(.*?)’)
match = pat.findall(atext)
puretext = match[0].replace(”, ”).replace(”, ”)
return puretext
def getcachedate(t): #get the date of cache
pat = re.compile(r’.*?(\d{4}-\d{1,2}-\d{1,2}) ‘)
match = pat.findall(t)
cachedate = match[0]
return cachedate
def getrank(checkword, domain): #main line
checkword = checkword.replace(‘\n’, ”)
checkword = decodeanyword(checkword)
baiduurl = createurl(checkword)
cont = getcontent(baiduurl)
soup = beautifulsoup.beautifulsoup(cont)
results = soup.findall(‘table’, {‘class’: ‘result’}) #find all results in this page
for result in results:
checkdata = unicode(result.find(‘span’, {‘class’: ‘g’}))
if re.compile(r’^[^/]*%s.*?’ %domain).match(checkdata): #改正则
nowrank = result[‘id’] #get the rank if match the domain info
reslink = result.find(‘h3’).a
resurl = reslink[‘href’]
domainurl = getlasturl(resurl) #get the target url
restitle = getatext(unicode(reslink)) #get the title of the target page
rescache = result.find(‘span’, {‘class’: ‘g’})
cachedate = getcachedate(unicode(rescache)) #get the cache date of the target page
res = u’%s, 第%s名, %s, %s, %s’ % (checkword, nowrank, restitle, cachedate, domainurl)
return res.encode(‘gb2312’)
break
else:
return ‘>100’
domain = ‘www.douban.com’ #set the domain which you want to search.
f = open(‘r.txt’)
for w in f.readlines():
print getrank(w, domain)
f.close()