python实现的一只从百度开始不断搜索的小爬虫

文中用到了beautifulsoup这个库, 目的是处理html文档分析的, 因为我只是提取了title的关键字,所以可以用正则表达式代替, 还有一个库是jieba, 这个库是中文分词的作用, 再有一个库是 chardet, 用来判断字符的编码, 本想多线程的, 但是自认为被搞糊涂了,就放弃了

代码如下:

#coding:utf-8import reimport urllibimport urllib2import sysimport timeimport queue import threadimport threadingimport jiebaimport chardetfrom beautifulsoup import beautifulsoup as bs

deep = 1000lock = threading.lock()path = “c:\\test\\”urlqueue = queue.queue()def pachong(): url = ‘http://www.baidu.com’ return url

def getpageurl(html): reurl = re.compile(r’]*?[hh][rr][ee][ff]\s*=\s*[\”\’]?([^>\”\’]+)[\”\’]?.*?>’) urls = reurl.findall(html) for url in urls: if len(url) > 10: if url.find(‘javascript’) == -1: urlqueue.put(url)

def getcontents(url): try: url = urllib2.quote(url.split(‘#’)[0].encode(‘utf-8’), safe = “%/:=&?~#+!$,;’@()*[]”) req = urllib2.urlopen(url) res = req.read() code = chardet.detect(res)[‘encoding’] #print #print code res = res.decode(str(code), ‘ignore’) res = res.encode(‘gb2312’, ‘ignore’) code = chardet.detect(res)[‘encoding’] #print code #print res return res except urllib2.httperror, e: print e.code return none except urllib2.urlerror, e: print str(e) return nonedef writetofile(html, url): fp = file(path + str(time.time()) + ‘.html’, ‘w’) fp.write(html) fp.close() def getkeywords(html): code = chardet.detect(html)[‘encoding’] if code == ‘iso-8859-2’: html.decode(‘gbk’, ‘ignore’).encode(‘gb2312’, ‘ignore’) code = chardet.detect(html)[‘encoding’] soup = bs(html, fromencoding=”gb2312″) titletag = soup.title titlekeywords = titletag.contents[0] cutwords(titlekeywords)def cutwords(contents): print contents res = jieba.cut_for_search(contents) res = ‘\n’.join(res) print res res = res.encode(‘gb2312’) keywords = file(path + ‘cutkeywors.txt’, ‘a’) keywords.write(res) keywords.close()def start(): while urlqueue.empty() == false: url = urlqueue.get() html = getcontents(url) getpageurl(html) getkeywords(html) #writetofile(html, url)

if __name__ == ‘__main__’: starturl = pachong() urlqueue.put(starturl) start()

发表评论