python爬虫-scrapy解析js

代码如下:

from selenium import selenium

class myspider(crawlspider): name = ‘cnbeta’ allowed_domains = [‘cnbeta.com’] start_urls = [‘http://www.jb51.net’]

rules = ( # extract links matching ‘category.php’ (but not matching ‘subsection.php’) # and follow links from them (since no callback means follow=true by default). rule(sgmllinkextractor(allow=(‘/articles/.*\.htm’, )), callback=’parse_page’, follow=true),

# extract links matching ‘item.php’ and parse them with the spider’s method parse_item )

def __init__(self): crawlspider.__init__(self) self.verificationerrors = [] self.selenium = selenium(“localhost”, 4444, “*firefox”, “http://www.jb51.net”) self.selenium.start()

def __del__(self): self.selenium.stop() print self.verificationerrors crawlspider.__del__(self)

def parse_page(self, response): self.log(‘hi, this is an item page! %s’ % response.url) sel = selector(response) from webproxy.items import webproxyitem

sel = self.selenium sel.open(response.url) sel.wait_for_page_to_load(“30000”) import time

time.sleep(2.5)

发表评论