black geometric shapes and red dot on a beige background

Scraper Bremen

Scraper for information about CCTV cameras in Bremen

Copy and paste the "spider" code below into a new file, such as spider-bremen.py. Run it from Terminal with a command like the following:

torsocks -i scrapy runspider spider-bremen.py -o bremen_data.csv

Scrapy "spider" file

import scrapy

class ExtractingTheInfeasible(scrapy.Spider):

    ### User variables
    #
    start_urls = ['http://www.standorte-videoueberwachung.bremen.de/sixcms/detail.php?gsid=bremen02.c.734.de']
    name = 'bremen_cctv'
    row_selector = 'div.cameras_list_item'
    next_selector = 'ul.pagination:nth-child(4) > li:nth-last-child(2) > a::attr(href)'
    item_selectors = {
        'title': '.cameras_title::text',
        'status': '.cameras_title .cameras_status_text::text',
        'address': '.cameras_address::text',
        'image_urls': '.cameras_thumbnail > img::attr(src)'
    }
    #
    ###

    custom_settings = {
        # 'DOWNLOAD_DELAY': '30',
        'DEPTH_LIMIT': '11',
        'ITEM_PIPELINES': {
        #   'scrapy.pipelines.files.FilesPipeline': 1,
            'scrapy.pipelines.images.ImagesPipeline': 1
        },
        'IMAGES_STORE': 'media',
        'IMAGES_THUMBS': { 'small': (50, 50) },
        # 'FILES_STORE': 'files',
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 6.1; rv:45.0) Gecko/20100101 Firefox/45.0',
        'TELNETCONSOLE_ENABLED': False,
        'DOWNLOAD_HANDLERS': {'s3': None}
    }

    def parse(self, response):

        row_selector = self.row_selector
        next_selector = self.next_selector
        item_selectors = self.item_selectors
        url_prefix = "/".join(response.url.split('/')[:3])

        for row in response.css(row_selector):
            yields = dict()
            for item in item_selectors.keys():
                item_content = row.css(item_selectors[item]).extract_first()
                if item == "image_urls" and item_content:
                    yields[item] = [url_prefix + item_content]
                elif item == "link" and item_content:
                    yields[item] = url_prefix + item_content
                else:
                    yields[item] = item_content
            yield yields

        if next_selector:
            next_page = response.css(next_selector).extract_first()
            if next_page:
                yield scrapy.Request(response.urljoin(next_page), callback=self.parse)