Exemplo n.º 1
0
class URLRoller():

    def __init__(self):
        self.file_logger = FileLogger()
        cfg_parser = CfgFileParser()
        self.Config = cfg_parser.file_reader(CONFIGFILE)
        self.Project = self.Config['Collector']['project']
        self.step = int(self.Config[self.Project]['loader'])
        enum_list = []
        for Idx in range(self.step):
            print('in the loop')
            col_data = self.Config[self.Project ][str(Idx+1)]
            file_in = codecs.open(col_data, 'r', 'utf-8')
            Cpt = 0
            for Line in file_in:
                URL = Line[:-1].strip()
                URL = re.sub(r'\s+', ' ', URL)
                enum_list.append(URL)
                Cpt +=1
                self.file_logger.csv_log('URL %s:' % Cpt, URL)
        self.URLList = enum_list
        self.StartURL = enum_list[0]
        self.URLRoller = itertools.cycle(enum_list)

    def next(self):
        return next(self.URLRoller)
    
    def starter(self):
        return self.StartURL
Exemplo n.º 2
0
class LocationCalculator:
    def __init__(self):
        self.Logger = FileLogger()

    def Locate(self, RetrivedData):
        try:
            self.Logger.Info(RetrivedData)
            return self.GetRSS(RetrivedData)
        except Exception as e:
            self.Logger.Error("ERROR in LocationCalculator: "+str(e))

    def GetRSS(self, RetrivedData):
        beaconsRSS = []
        for beacon in RetrivedData:
            if not any(b['UID'] == beacon['UID'] for b in beaconsRSS):

                beaconsFound = [x for x in RetrivedData if x['UID'] == beacon['UID']]

                totalRSS =0
                for b in beaconsFound:
                    if "RSS" in b:
                        totalRSS += int(b['RSS'])
            
                newBeaconRSS = {"UID": beacon['UID'], "RSS": totalRSS/len(beaconsFound)}
                beaconsRSS.append(newBeaconRSS)

        self.Logger.Info(beaconsRSS)
        return beaconsRSS
Exemplo n.º 3
0
class JSONPipeline(object):
    # Defines a class to save the scraped data into a json line file
    @classmethod
    def from_crawler(cls, crawler):
        pipeline = cls()
        crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
        crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)

        return pipeline

    def spider_opened(self,spider):
        # Open the file and start to export data
        self.file_logger = FileLogger()
        self.file_logger.csv_log('PIPELINE', 'open a JSON pipeline')
        self.file = codecs.open('output.json', 'wb','utf-8')
        self.exporter = JsonLinesItemExporter(self.file)
        self.exporter.start_exporting()

    def spider_closed(self,spider):
        # Stop exporting data and close the file
        self.exporter.finish_exporting()
        self.file.close()

    def process_item(self,item,spider):
        # Export the data scraped by the spider
        self.exporter.export_item(item)
        return item
Exemplo n.º 4
0
 def __init__(self, profile_url='', next=False, *args, **kwargs):
     # Get value for given attributes
     super(ThesesSpider, self).__init__(*args, **kwargs)
     self.start_urls = [profile_url]
     self.Next = next
     # Instantiate a FileLogger object
     self.file_logger = FileLogger()
Exemplo n.º 5
0
 def spider_opened(self,spider):
     # Open the file and start to export data
     self.file_logger = FileLogger()
     self.file_logger.csv_log('PIPELINE', 'open a JSON pipeline')
     self.file = codecs.open('output.json', 'wb','utf-8')
     self.exporter = JsonLinesItemExporter(self.file)
     self.exporter.start_exporting()
Exemplo n.º 6
0
    def __getChainOfLoggers():
        errorLogger = ErrorLogger(AbstractLogger.ERROR)
        fileLogger = FileLogger(AbstractLogger.DEBUG)
        consoleLogger = ConsoleLogger(AbstractLogger.INFO)

        errorLogger.setNextLogger(fileLogger)
        fileLogger.setNextLogger(consoleLogger)

        return errorLogger
Exemplo n.º 7
0
class MongoPipeline(object):
    # Defines a class to save the scraped data into MongoDB
    
    def spider_opened(self,spider):
        # Get the database parameters from the .cfg file
        cfg_parser = CfgFileParser()
        self.Config = cfg_parser.file_reader(CONFIGFILE)
        # Assign the mongoDB information
        self.file_logger = FileLogger()
        self.Project = self.Config['Collector']['project']
        self.mongo_db = self.Config[self.Project]['mongo_db']
        self.Host = self.Config[self.Project]['host']
        self.Port = int(self.Config[self.Project]['port'])
        self.Collection = self.Config[self.Project]['collection']
        self.DataSet = self.Config[self.Project]['dataset']
        self.mongo_client = MongoClient(self.Host, self.Port)
        self.DBase = self.mongo_client[self.mongo_db]
        self.DBase[self.Collection].ensure_index([('idx', pymongo.ASCENDING),('created_at', pymongo.ASCENDING)])
        self.Idx = 0
        Cursor = self.DBase[self.Collection].find({}).sort([('created_at',pymongo.DESCENDING)]).limit(1)
        for Record in Cursor :
            self.Idx = Record['idx']
        self.file_logger = FileLogger()
        self.file_logger.csv_log('PIPELINE', 'open a MONGO pipeline')
    @classmethod
    def from_crawler(cls, crawler):
        pipeline = cls()
        crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
#         crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
        return pipeline

    def spider_closed(self,spider):
        self.mongo_client.close()

    def process_item(self, item, spider):
        # Export the data scraped by the spider
        # Get the dom as a str from the scrapy.Item
        dom_list = item['dom_tree']
        item_comp = BinaryCompressor.compress(dom_list[0])
        self.Idx +=1
        self.file_logger.csv_log('INFO', 'process_item', self.Idx)
        Record = {'created_at': datetime.datetime.now(),
                         'parsed': False,
                        'profile_url': item['profile_url'],
                        'page_url': item['page_url'],
                        'level': item['level'],
                        'zipdom': item_comp,
                        'dataset':self.DataSet,
                        'idx':self.Idx}
        self.DBase[self.Collection].insert(Record)
        return item
Exemplo n.º 8
0
class Extractor(yapdi.Daemon):

    def __init__(self):
        self.file_logger = FileLogger()
        cfg_parser = CfgFileParser()
        self.Config = cfg_parser.file_reader(cfg_file)
        pid_file = self.Config['Extractor']['pid']
        self.Wait = int(self.Config['Extractor']['wait'])
        self.file_logger = FileLogger()
        self.Project = self.Config['Extractor']['project']
        Parser = self.Config[self.Project]['parser']
        self.Parser = eval(Parser + '()')
        self.Parser.Project = self.Project
        self.Parser.init_DataBase()
        try:
            Restriction = self.Config['Extractor']['restriction']
            self.Restriction = json.loads(Restriction)
            self.file_logger.csv_log('EXTRACTOR', 'Extractor init ok')
        except Exception as exp:
            self.file_logger.csv_log('ERROR',exp)
            raise
        yapdi.Daemon.__init__(self, pid_file)


    def Extracting(self):
        self.file_logger.csv_log('EXTRACTOR', 'start Extracting')

        while True:
            try:
                self.Parser.parse(self.Restriction)
                time.sleep(self.Wait)
            except Exception as exp:
                self.file_logger.csv_log('EXTRACTOR', 'Bug in Extracting():', exp)
Exemplo n.º 9
0
class DataManager:
    def __init__(self):
        self.DBConnection = DBRetriever()
        self.Logger = FileLogger()
        self.Locator = LocationCalculator()
        self.QueryBuilder = QueryBuilder()

    def Manage(self, parameters):
        # BuildQuery
        query = self.QueryBuilder.Build(parameters)

        # IfDeviceID
        if 'DeviceID' in query:
            return self.GetDeviceLocation(query.pop('DeviceID'), query)

        # IfBeaconID
        if 'BeaconID' in query:
            self.Logger.Info('Datamanager: ' + str(query))
            return self.GetBeaconinformation('BeaconInformation', query)

    def GetDeviceLocation(self, deviceID, filter):
        nearByBeacons = self.Querydatabase(deviceID, filter)
        return self.Locator.Locate(nearByBeacons)

    def GetBeaconinformation(self, beaconInformation, filter):
        return self.Querydatabase(beaconInformation, filter)

    def Querydatabase(self, collectionName, filter):
        return self.DBConnection.QueryDatabase(collectionName, filter)
Exemplo n.º 10
0
 def return_result(self, selector: int):
     switcher = {
         1: ConsoleLogger(),
         2: FileLogger(),
         3: DatabaseLogger()
     }
     func = switcher.get(selector)
     return func
Exemplo n.º 11
0
 def __init__(self):
     cfg_parser = CfgFileParser()
     self.Config = cfg_parser.file_reader(CONFIGFILE)
     self.Project = self.Config['Collector']['project']
     self.Drop = self.Config[self.Project]['drop'] == 'True'
     self.Cycle = self.Config[self.Project]['cycle'] == 'True'
     self.Next = self.Config[self.Project]['next'] == 'True'
     self.mongo_db = self.Config[self.Project]['mongo_db']
     self.Host = self.Config[self.Project]['host']
     self.Port = int(self.Config[self.Project]['port'])
     self.Collection = self.Config[self.Project]['collection']
     self.mongo_client = MongoClient(self.Host, self.Port)
     self.DBase = self.mongo_client[self.mongo_db]
     if self.Drop:
         self.DBase[self.Collection].drop()
     self.file_logger = FileLogger()
     self.set_up()
    def get_log(self, msg, type):

        if type is 'Console':
            return ConsoleLogger(msg)

        elif type is 'File':
            return FileLogger(msg)

        elif type is 'Database':
            return DatabaseLogger(msg)
Exemplo n.º 13
0
    def requestData(self,
                    file_log_name=None,
                    referer=None,
                    x_requested_with=None,
                    content_type=None,
                    **urllibArg):
        try:
            # This is the Request
            req = urllib.request.Request(**urllibArg)
            if referer is not None:
                req.add_header('Referer', referer)
            if x_requested_with is not None:
                req.add_header('X-Requested-With', x_requested_with)
            if content_type is not None:
                req.add_header('Content-Type', content_type)
            print("Request -> " + urllibArg['url'])
            site = self.opener.open(req)

            response_data = None

            if site.info().get('Content-Encoding') == 'gzip':
                response_data = gzip.decompress(site.read())
            else:
                response_data = site.read()

            if file_log_name is not None:
                fileLogger = FileLogger(file_log_name=file_log_name,
                                        data=response_data,
                                        mode="wb")

        except Exception as e:
            response_data = None
            reference = urllibArg['url']
            message = str(urllibArg)
            timestampString = datetime.fromtimestamp(time()).strftime('%b-%H')
            fileLogger = FileLogger(file_log_name=('request-data-' +
                                                   timestampString + '.txt'),
                                    reference=reference,
                                    data=message,
                                    exception=e,
                                    mode="a")
        return response_data
Exemplo n.º 14
0
 def __init__(self):
     self.file_logger = FileLogger()
     cfg_parser = CfgFileParser()
     self.Config = cfg_parser.file_reader(CONFIGFILE)
     self.Agenda = []
     self.Pace = self.Config['Agenda']['pace']
     Date = self.Config['Agenda']['begin']
     self.Begin = datetime.datetime.strptime(Date.strip(), '%Y-%m-%d %H:%M:%S')
     Date = self.Config['Agenda']['end']
     self.End = datetime.datetime.strptime(Date.strip(), '%Y-%m-%d %H:%M:%S')
     self.CycledAgenda = itertools.cycle(self.Agenda)
Exemplo n.º 15
0
class GetHandler:
    def __init__(self):
        self.DataManager = DataManager()
        self.Logger = FileLogger()

    def Handle(self, arguments):
        try:
            self.Logger.Info("GET request received")
            parameters = self.Parse(arguments)
            return self.GetData(parameters)
        except Exception as e:
            self.Logger.Error(e)

    def Parse(self, arguments):
        params = {}
        for key in arguments.keys():
            params[key] = arguments[key].value
        return params

    def GetData(self, parameters):
        return self.DataManager.Manage(parameters)
Exemplo n.º 16
0
 def spider_opened(self,spider):
     # Get the database parameters from the .cfg file
     cfg_parser = CfgFileParser()
     self.Config = cfg_parser.file_reader(CONFIGFILE)
     # Assign the mongoDB information
     self.file_logger = FileLogger()
     self.Project = self.Config['Collector']['project']
     self.mongo_db = self.Config[self.Project]['mongo_db']
     self.Host = self.Config[self.Project]['host']
     self.Port = int(self.Config[self.Project]['port'])
     self.Collection = self.Config[self.Project]['collection']
     self.DataSet = self.Config[self.Project]['dataset']
     self.mongo_client = MongoClient(self.Host, self.Port)
     self.DBase = self.mongo_client[self.mongo_db]
     self.DBase[self.Collection].ensure_index([('idx', pymongo.ASCENDING),('created_at', pymongo.ASCENDING)])
     self.Idx = 0
     Cursor = self.DBase[self.Collection].find({}).sort([('created_at',pymongo.DESCENDING)]).limit(1)
     for Record in Cursor :
         self.Idx = Record['idx']
     self.file_logger = FileLogger()
     self.file_logger.csv_log('PIPELINE', 'open a MONGO pipeline')
Exemplo n.º 17
0
class ThesesSpider(scrapy.Spider):
    # Defines a spider dedicated to crawling Theses.fr
    # works on URL like : http://www.theses.fr/<IDTHESE>
    name = 'PhD'
    allowed_domains = ["Theses.fr"]

    # rules = (Rule(LinkExtractor(allow=(), restrict_xpaths=('//li[@class="pager-next"]',)), callback="parse", follow=True),)

    def __init__(self, profile_url='', next=False, *args, **kwargs):
        # Get value for given attributes
        super(ThesesSpider, self).__init__(*args, **kwargs)
        self.start_urls = [profile_url]
        self.Next = next
        # Instantiate a FileLogger object
        self.file_logger = FileLogger()

    def parse(self, response):
        # Get the page's body HTML
        self.file_logger.csv_log('SPIDER', 'path', '')
        html_dom = response.xpath('//body').extract()
        # Write the HTML DOM into the dom_tree field of a DOM item
        dom = DOM(dom_tree=html_dom,
                  profile_url=self.start_urls[0],
                  page_url=response.url,
                  level=1)
        self.file_logger.csv_log('SPIDER', 'Success', response.url)
        yield dom

    def errback(self, failure):
        # In case of error, log the error's message
        self.file_logger.csv_log('SPIDER', repr(failure), self.start_urls[0])
Exemplo n.º 18
0
class PostHandler:
    def __init__(self):
        self.DBConnection = DBSaver()
        self.Logger = FileLogger()

    def Handle(self, arguments):
        try:
            self.Logger.Info("POST request received")
            beaconsDict = self.Parse(arguments)
            self.Save(beaconsDict)
        except Exception as e:
            self.Logger.Error(e)

    def Parse(self, arguments):
        PostObject = arguments.value
        self.Logger.Info('Object Recived: ' + PostObject)
        josnObject = json.loads(PostObject)
        beaconDataDict = josnObject['Beacons']
        timestamp = int(time.time())
        self.Logger.Info(timestamp)

        for beaconData in beaconDataDict:
            beaconData["Timestamp"] = timestamp
            self.Logger.Info(beaconData)

        return beaconDataDict

    def Save(self, beaconsDict):
        self.Logger.Info("Saving Data")
        self.DBConnection.SaveBeaconData(beaconsDict)
Exemplo n.º 19
0
class DBRetriever:
    def __init__(self):
        self.connection = MongoDB('mongodb://localhost:27017/', "mydatabase")
        self.Logger = FileLogger()

    def QueryDatabase(self, collectionName, filter):
        self.Logger.Info('CollectionName: ' + collectionName + ' Query: ' +
                         str(filter))
        cursor = self.connection.QueryCollection(collectionName, filter)
        #cursor = self.connection.LoadAllEntries(collectionName)
        return self.ParseCursor(cursor)

    def ParseCursor(self, cursor):
        cursorList = list(cursor)
        parsedData = []
        for beaconCursor in cursorList:
            beaconJSON = dumps(beaconCursor)
            beaconObject = json.loads(beaconJSON)
            parsedData.append(beaconObject)

        self.Logger.Info('ReturnedData: ' + str(parsedData))
        return parsedData
Exemplo n.º 20
0
    def __init__(self, comedy, ip, port=7766):

        # call the super init for threading
        super(ComedyParser, self).__init__()

        # parse comedy
        self._comedy = Comedy(comedy)

        # init FileLoger
        self._logger = FileLogger('log_ComedyParser')

        # init laughing and applauding flag
        self.laughing = None
        self.applauding = None
        self.laughter_counter = 0

        # Set initial after Response mode to NO.
        # This is used to check if the 1sec TTS gap is required
        self._afterResponse = False

        # init the AudienceAnalyser
        self._audienceAnalyser = AudienceAnalyser(expectedPeople,
                                                  self._triggerStartFunction,
                                                  self._triggerStopFunction)

        # start the audience analyser
        self._audienceAnalyser.start()

        # init communication module
        self._communication = CommunicationModule(ip, port)

        # connect, auth and start keep-alive thread
        self._communication.connect()

        # generate all tokens in sentences
        self._tokenIterator = self._generateCombinedTokens()

        # init response iterator
        self._responseTokenIterator = None
Exemplo n.º 21
0
    def __init__(self, expectedPeople, laughStartCallback, laughStopCallback):

        # init FileLoger
        self._logger = FileLogger('log_ShoreFrames')

        # save the callbacks
        self._laughStartCallback = laughStartCallback
        self._laughStopCallback = laughStopCallback

        # init the Audience
        self.audience = Audience(expectedPeople)

        # init ReadShore module
        self._readShore = ReadShore(self._shoreDataReceived)

        # initialise the basic random generator
        random.seed()
        
        # last known audience laugh state
        self.laughStateLast = 'Not Laughing'
Exemplo n.º 22
0
class QueryBuilder:
    def __init__(self):
        self.Logger = FileLogger()

    def Build(self, arguments):
        query = self.GetFilter(arguments)
        return query

    def GetFilter(self, arguments):
        self.Logger.Info(arguments)
        query = {}
        for key in arguments:
            if key == 'Offset':
                #if (int(arguments[key])) == 0:
                #continue
                query['Timestamp'] = {
                    '$gte': (int(time.time()) - (int(arguments[key]) * 60))
                }
                continue

            query[key] = arguments[key]
        return query
Exemplo n.º 23
0
 def __init__(self):
     self.DataManager = DataManager()
     self.Logger = FileLogger()
Exemplo n.º 24
0
altitude = 1.5
yaw = 0
setpoints = []
setpoints += trajectories.takeoff(initial_pos, altitude, yaw)
setpoints += trajectories.xySquare(2.0, altitude, yaw)
setpoints += trajectories.land(initial_pos, altitude, yaw)
"""
# optitrack data for control
ot_position = np.zeros(3)
ot_attitude = np.zeros(3)

cflib.crtp.init_drivers(enable_debug_driver=False)
cf = Crazyflie(rw_cache='./cache')

# logger setup
flogger = FileLogger(cf, cf_uri, fileName)
flogger.enableConfig('otpos')
flogger.enableConfig('otatt')
flogger.enableConfig('attitude')
flogger.enableConfig('gyros')
flogger.enableConfig('acc')
flogger.enableConfig('twr')
flogger.start()


def OT2NED(vector3D_ot):
    # convert vector from OT coordinates to NED
    vector3D_ned = [0, 0, 0]
    vector3D_ned[0] = vector3D_ot[0]  # NED.x = OT.x
    vector3D_ned[1] = vector3D_ot[2]  # NED.y = OT.z
    vector3D_ned[2] = -vector3D_ot[1]  # NED.z = -OT.y
Exemplo n.º 25
0
 def __init__(self):
     self.DBConnection = DBRetriever()
     self.Logger = FileLogger()
     self.Locator = LocationCalculator()
     self.QueryBuilder = QueryBuilder()
Exemplo n.º 26
0
 def __init__(self):
     self.connection = MongoDB('mongodb://localhost:27017/', "mydatabase")
     self.Logger = FileLogger()
Exemplo n.º 27
0
 def getLogger(self):
     if self.isFileLoggingEnabled():
         return FileLogger()
     else:
         return ConsoleLogger()
Exemplo n.º 28
0
class AudienceAnalyser:
    ''' Audience class '''

    def __init__(self, expectedPeople, laughStartCallback, laughStopCallback):

        # init FileLoger
        self._logger = FileLogger('log_ShoreFrames')

        # save the callbacks
        self._laughStartCallback = laughStartCallback
        self._laughStopCallback = laughStopCallback

        # init the Audience
        self.audience = Audience(expectedPeople)

        # init ReadShore module
        self._readShore = ReadShore(self._shoreDataReceived)

        # initialise the basic random generator
        random.seed()
        
        # last known audience laugh state
        self.laughStateLast = 'Not Laughing'

    def _shoreDataReceived(self, data):

        # read the data
        self.read(data)
        #print data

    def start(self):

        # start the readShore thread
        self._readShore.start()

    def stop(self):
        pass

    def read(self, shoreLine):

        # convert shoreLine into dict
        line = sp.parseLine(shoreLine)

        if ('Frame' in line.keys() and
            'Left' in line.keys() and
            'Top' in line.keys() and
            'Right' in line.keys() and
            'Bottom' in line.keys()):

            # pass the dict to the Audience object
            self.audience.read(line)

            # log frame
            self._logger.log("Shore frame '%d'" % (line['Frame']))
        
            # determine audience laugh state
            
            if self.laughStateLast == 'Not Laughing':
                if self.audience.laughProportion() > 0.333:
                    self.laughStateLast = 'Laughing'
                    self._laughStartCallback()
            elif self.laughStateLast == 'Laughing':
                if self.audience.laughProportion() < 0.111:
                    self.laughStateLast = 'Not Laughing'
                    self._laughStopCallback()
Exemplo n.º 29
0
class GetCommentTask:

    def __init__(self, task, name="extract-comment"):
        self.task = task
        self.name = name
        self.comments = []
        self.log = FL('Extracted Comments {:%Y-%m-%d %H:%M:%S}.txt'.format(datetime.datetime.now()))

    def init_task(self):

        hash_index = 0
        loop = True
        max_index = len(self.task.hashtags)
        next_max_id = ""

        while loop:

            self.task.check_ops_limit()

            current_hash = self.task.hashtags[hash_index]
            self.task.api.getHashtagFeed(current_hash, maxid=next_max_id)

            print ""
            print "CURRENT HASHTAG = " + current_hash
            print ""

            ig_media = self.task.api.LastJson

            if "next_max_id" not in ig_media:
                print "####### Changing hashtag #######"
                hash_index += 1
                next_max_id = ""
                if hash_index >= max_index - 1:
                    break
            else:
                next_max_id = self.do_task(ig_media)

    def do_task(self, ig_media):
        last_max_id = ig_media['next_max_id']

        if "ranked_items" in ig_media:
            key = "ranked_items"
        else:
            key = "items"

        for ig in ig_media[key]:

            self.task.api.getMediaComments(ig["id"])

            for c in reversed(self.task.api.LastJson['comments']):
                txt = c['text']
                if self.check_string(txt):
                    self.comments.append(txt)
                    print "Comment = " + txt.encode('utf-8', 'ignore').decode('utf-8')
                    self.log.add_to_file(txt=txt)

                    self.task.task_count += 1
                    time.sleep(1)

            time.sleep(self.task.get_time_delay())

        return last_max_id

    """""
    Checks if string doesnt contain special non-english characters, @, or Follow Me.
    """""
    def check_string(self,str):
        pattern = re.compile("^(?!follow|followme)[\s\w\d\?><;,\{\}\[\]\-_\+=!\#\$%^&\*\|\']*$")
        return pattern.match(str)
Exemplo n.º 30
0
 def __init__(self, task, name="extract-comment"):
     self.task = task
     self.name = name
     self.comments = []
     self.log = FL('Extracted Comments {:%Y-%m-%d %H:%M:%S}.txt'.format(datetime.datetime.now()))
Exemplo n.º 31
0
 def __init__(self):
     self.Logger = FileLogger()
Exemplo n.º 32
0
class WebPageCollector():
    def __init__(self):
        cfg_parser = CfgFileParser()
        self.Config = cfg_parser.file_reader(CONFIGFILE)
        self.Project = self.Config['Collector']['project']
        self.Drop = self.Config[self.Project]['drop'] == 'True'
        self.Cycle = self.Config[self.Project]['cycle'] == 'True'
        self.Next = self.Config[self.Project]['next'] == 'True'
        self.mongo_db = self.Config[self.Project]['mongo_db']
        self.Host = self.Config[self.Project]['host']
        self.Port = int(self.Config[self.Project]['port'])
        self.Collection = self.Config[self.Project]['collection']
        self.mongo_client = MongoClient(self.Host, self.Port)
        self.DBase = self.mongo_client[self.mongo_db]
        if self.Drop:
            self.DBase[self.Collection].drop()
        self.file_logger = FileLogger()
        self.set_up()

    def set_up(self):
        PipeLines = self.Config[self.Project]['pipelines']
        PipeLines = json.loads(PipeLines)
        self.spider_settings = {
            'USER_AGENT':
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36 OPR/38.0.2220.41',
            'DOWNLOADER_MIDDLEWARES ': {
                'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 543
            },
            'DOWNLOAD_DELAY': 3,
            'RANDOMIZE_DOWNLOAD_DELAY': True,
            'COOKIES_DEBUG': False,
            'COOKIES_ENABLED': True,
            'ITEM_PIPELINES': PipeLines,
            'LOG_LEVEL': 'INFO'
        }
        self.SpiderName = self.Config[self.Project]['spider']
        self.URLRoller = URLRoller()
        if self.Config['Agenda']['pace'] == 'daily':
            self.Agenda = DailyAgenda()
        if self.Config['Agenda']['pace'] == 'weekly':
            self.Agenda = WeeklyAgenda()
        self.file_logger.csv_log('COLLECTOR', 'SetUp is done')

    def Crawl_job(self, URL, Next):
        Runner = CrawlerRunner(settings=self.spider_settings)
        return Runner.crawl(eval(self.SpiderName), profile_url=URL, next=Next)

    def Schedule_next_crawl(self, null, Now):
        self.file_logger.csv_log('COLLECTOR', 'Crawl Session will start')
        if self.Agenda.Begin <= Now and Now < self.Agenda.End:
            delay = self.Agenda.Delay(Now)
            self.file_logger.csv_log('COLLECTOR', 'delayed by', delay)
            reactor.callLater(delay, self.LoopCapturing)
        else:
            self.file_logger.csv_log('COLLECTOR', 'Crawl Session is ended')
            reactor.stop()

    def Capturing(self):
        Now = datetime.datetime.now()
        URL = self.URLRoller.next()
        print(Now.time(), '->', URL)
        Job = self.Crawl_job(URL, self.Next)
        Job.addCallback(self.Schedule_next_crawl, Now)
        Job.addErrback(self.errback)

    def CaptureOnes(self, URL):
        print('->', URL)
        Now = datetime.datetime.now()
        Job = self.Crawl_job(URL, False)
        Job.addCallback(self.Schedule_next_crawl, Now)
        Job.addErrback(self.errback)

    def LoopCapturing(self):
        Now = datetime.datetime.now()
        URL = self.URLRoller.next()
        if URL == self.URLRoller.starter() and not self.Cycle:
            reactor.stop()
        else:
            print(Now.time(), '->', URL)
            Job = self.Crawl_job(URL, self.Next)
            Job.addCallback(self.Schedule_next_crawl, Now)
            Job.addErrback(self.errback)

    def errback(self, failure):
        print('ERROR:', failure.value)
        self.file_logger.csv_log('COLLECTOR', failure.value,
                                 self.start_urls[0])