1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
|
""" Topic: 登录爬虫 Desc : 模拟登录https://github.com后将自己的issue全部爬出来 tips:使用chrome调试post表单的时候勾选Preserve log和Disable cache """ import logging import re import sys import scrapy from scrapy.spiders import CrawlSpider, Rule from scrapy.linkextractors import LinkExtractor from scrapy.http import Request, FormRequest, HtmlResponse
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
class GithubSpider(CrawlSpider): name = "github" allowed_domains = ["github.com"] start_urls = [ 'https://github.com/issues', ] rules = ( Rule(LinkExtractor(allow=('/issues/\d+',), restrict_xpaths='//ul[starts-with(@class, "table-list")]/li/div[2]/a[2]'), callback='parse_page'), Rule(LinkExtractor(restrict_xpaths='//a[@class="next_page"]')), ) post_headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6", "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Type": "application/x-www-form-urlencoded", "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36", "Referer": "https://github.com/", }
def start_requests(self): return [Request("https://github.com/login", meta={'cookiejar': 1}, callback=self.post_login)]
def post_login(self, response): authenticity_token = response.xpath( '//input[@name="authenticity_token"]/@value').extract_first() logging.info('authenticity_token=' + authenticity_token) return [FormRequest.from_response(response, url='https://github.com/session', meta={'cookiejar': response.meta['cookiejar']}, headers=self.post_headers, formdata={ 'utf8': '✓', 'login': 'yidao620c', 'password': '******', 'authenticity_token': authenticity_token }, callback=self.after_login, dont_filter=True )]
def after_login(self, response): for url in self.start_urls: yield Request(url, meta={'cookiejar': response.meta['cookiejar']})
def parse_page(self, response): """这个是使用LinkExtractor自动处理链接以及`下一页`""" logging.info(u'--------------消息分割线-----------------') logging.info(response.url) issue_title = response.xpath( '//span[@class="js-issue-title"]/text()').extract_first() logging.info(u'issue_title:' + issue_title.encode('utf-8'))
def _requests_to_follow(self, response): """重写加入cookiejar的更新""" if not isinstance(response, HtmlResponse): return seen = set() for n, rule in enumerate(self._rules): links = [l for l in rule.link_extractor.extract_links(response) if l not in seen] if links and rule.process_links: links = rule.process_links(links) for link in links: seen.add(link) r = Request(url=link.url, callback=self._response_downloaded) r.meta.update(rule=n, link_text=link.text, cookiejar=response.meta['cookiejar']) yield rule.process_request(r)
|