2025-05-28 19:16:17 +08:00

226 lines
8.5 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
import scrapy
from redisbloom.client import Client
from scrapy.exceptions import IgnoreRequest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from scrapy.http import HtmlResponse
from scrapy import signals
from ShipSpiders.utils.http_utils import http_get
import os
from scrapy.exceptions import IgnoreRequest
import random
import time
from twisted.internet.error import ConnectionRefusedError
class ShipspidersSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ShipspidersDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class UserAgentDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
s.uas = crawler.spider.settings['CUSTOM_USER_AGENT']
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
UA = random.choice(self.uas)
if request.headers is None:
request.headers = {}
request.headers['User-Agent'] = UA
return None
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class MyProxyDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
if self.request_count == 0 or self.request_count >= int(spider.settings['PER_BATCH_IP_USE_TIMES']):
self.request_count = 0
self.proxyIpList = []
proxy_service = spider.settings['PROXY_SERVICE']
#调用自建Ip池api
self.proxyIpList = http_get(proxy_service + 'get_all')
while len(self.proxyIpList) == 0:
self.proxyIpList = http_get(proxy_service + 'get_all')
time.sleep(1)
proxy = random.choice(self.proxyIpList)
request.meta['proxy'] = 'http://%s'%proxy['proxy']
self.request_count += 1
return None
def process_exception(self, request, exception, spider):
if isinstance(exception,ConnectionRefusedError):
proxy_service = spider.settings['PROXY_SERVICE']
http_get(proxy_service + 'delete?proxy=' + request.meta['proxy'].split('/')[-1])
return request
return None
def spider_opened(self, spider):
self.request_count = 0
spider.logger.info('Spider opened: %s' % spider.name)
class DumpFilterDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
redisHost = crawler.spider.settings['REDIS_HOST']
redisPort = crawler.spider.settings['REDIS_PORT']
pwd = crawler.spider.settings['REDIS_PWD']
s.bloomFilter = Client(host= redisHost, port= redisPort,password= pwd)
s.deepth = crawler.spider.settings['CRAWL_DEEPTH']
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
#判重
if 'CUSTOM_FILTER_KEY' in request.meta and 'CUSTOM_FILTER_VAL' in request.meta:
key = request.meta['CUSTOM_FILTER_KEY']
val = request.meta['CUSTOM_FILTER_VAL']
if self.bloomFilter.bfExists(key, val) == 1:
spider.logger.info("[KEY EXISTED] '%s' exists! Skip to next request." % val)
raise IgnoreRequest()
return None
def process_exception(self, request, exception, spider):
return None
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class DumpFiliterSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
redisHost = crawler.spider.settings['REDIS_HOST']
redisPort = crawler.spider.settings['REDIS_PORT']
pwd = crawler.spider.settings['REDIS_PWD']
s.bloomFilter = Client(host= redisHost, port= redisPort,password= pwd)
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_output(self, response, result, spider):
for i in result:
#对于符合爬取条件的请求,加入判重
if isinstance(i, scrapy.Item):
if 'CUSTOM_FILTER_KEY' in response.request.meta and 'CUSTOM_FILTER_VAL' in response.request.meta:
key = response.request.meta['CUSTOM_FILTER_KEY']
val = response.request.meta['CUSTOM_FILTER_VAL']
if self.bloomFilter.bfAdd(key, val) <= 0:
continue
spider.logger.info("[KEY ADDED] '%s' added." % val)
yield i
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)