From 9fb538feb0989df7bcd3538ae178165cc10cc184 Mon Sep 17 00:00:00 2001 From: Étienne Loks Date: Sat, 10 Aug 2019 12:28:09 +0200 Subject: Better management of timeout in crawl... --- commcrawler/scrapy.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'commcrawler/scrapy.py') diff --git a/commcrawler/scrapy.py b/commcrawler/scrapy.py index bdd28c3..767827a 100644 --- a/commcrawler/scrapy.py +++ b/commcrawler/scrapy.py @@ -5,7 +5,7 @@ import requests import scrapy from scrapy.crawler import CrawlerProcess -from scrapy.exceptions import NotSupported +from scrapy.exceptions import NotSupported, CloseSpider from scrapy.linkextractors import LinkExtractor from django.conf import settings @@ -129,15 +129,15 @@ class DefaultSpider: pk=self.crawl_result.pk) result.status = "T" result.save() - self.is_timeout = True - return True + self.is_timeout = True + raise CloseSpider('timeout') def parse(self, response): result = { "url": response.url, } if self.is_timeout or self.timeout(): - return [] + raise CloseSpider('timeout') for domain in self.excluded_domains: if domain in response.url: result["is_online"] = False -- cgit v1.2.3