def deleteConnection(self, connection):
        # Delete referenced properties 1st
        db.delete(
            GaeConnectionModel.last_active.get_value_for_datastore(
                connection.model))
        db.delete(
            GaeConnectionModel.connected.get_value_for_datastore(
                connection.model))
        db.delete(
            GaeConnectionModel.last_polled.get_value_for_datastore(
                connection.model))

        # Optional referenced properties
        authentication_key = GaeConnectionModel.authentication.get_value_for_datastore(
            connection.model)
        if authentication_key is not None:
            db.delete(authentication_key)

        session_key = GaeConnectionModel.session.get_value_for_datastore(
            connection.model)
        if session_key is not None:
            db.delete(session_key)

        # Delete connection
        connection.model.delete()
        connection.model = None
        ConnectionManager.deleteConnection(self, connection)

        db.run_in_transaction(self._decrementChannelCount,
                              connection.channel_name)
    def __init__(self, connection_class=Connection, connection_params=None,
        mc_servers=['127.0.0.1:11211'], mc_debug=0):
        ConnectionManager.__init__(self, connection_class=connection_class,
            connection_params=connection_params)

        self.mc = self.createMcClient(mc_servers, mc_debug)
        self._lock = memcache_manager.MemcacheMutex(self.mc)
class UpdateH5iManager:

	def __init__(self, connectionStr):
		self.connMgr = ConnectionManager()
		self.conn = self.connMgr.connectToDatabase(connectionStr)
		self.cur = self.conn.cursor()

	def updateDatasetMarkerH5Index(self, datasetId, markerId, idx):
		print ("Updating %s:%s:%s ..." % (datasetId, markerId, idx))
		updateStmt = "update dataset_marker set marker_idx=%s where dataset_id=%s and marker_id=%s;"
		self.cur.execute(updateStmt, (idx, datasetId, markerId))

	def updateDatasetDnarunH5Index(self, datasetId, sampleId, idx):
		print ("Updating %s:%s:%s ..." % (datasetId, sampleId, idx))
		updateStmt = "update dataset_dnarun set dnarun_idx=%s where dataset_id=%s and dnarun_id=%s;"
		self.cur.execute(updateStmt, (idx, datasetId, sampleId))

	def updateSerialSequence(self, tableName, primaryKeyColumnName):
		updateSeqSql = "SELECT pg_catalog.setval(pg_get_serial_sequence('"+tableName+"', '"+primaryKeyColumnName+"'), MAX("+primaryKeyColumnName+")) FROM "+tableName+";"
		#print("updateSeqSql = "+updateSeqSql)
		self.cur.execute(updateSeqSql)

	def commitTransaction(self):
		self.conn.commit()

	def rollbackTransaction(self):
		self.conn.rollback()

	def closeConnection(self):
		self.connMgr.disconnectFromDatabase()
    def __init__(self, connection_class=Connection, connection_params=None,
        mc_servers=['127.0.0.1:11211'], mc_debug=0):
        ConnectionManager.__init__(self, connection_class=connection_class,
            connection_params=connection_params)

        self.mc = self.createMcClient(mc_servers, mc_debug)
        self._lock = memcache_manager.MemcacheMutex(self.mc)
    def deleteConnection(self, connection):
        d = self.connections.delete().\
            where(self.connections.c.id==connection.id)
        db = self.getDb()
        db.execute(d)
        db.close()

        ConnectionManager.deleteConnection(self, connection)
    def deleteConnection(self, connection):
        d = self.connections.delete().\
            where(self.connections.c.id==connection.id)
        db = self.getDb()
        db.execute(d)
        db.close()

        ConnectionManager.deleteConnection(self, connection)
Exemple #7
0
 def notice_slaves_stop(self):
     task_connection = ConnectionManager(queue_name=self.task_queue_name,
                                         durable=True, no_ack=False)
     screen_list = [key for key in self.slave_manager.slave_dict.keys()]
     for screen in screen_list:
         task_connection.publish('STOP {}')
     # task_connection.broadcast_task('STOP {}')
     task_connection.stop()
    def __init__(self, engine, metadata, connection_class=Connection, connection_params=None,
                 table_prefix=''):
        ConnectionManager.__init__(self, connection_class=connection_class,
            connection_params=connection_params)

        self.engine = engine
        self.metadata = metadata
        self.table_prefix = table_prefix and "%s_" % table_prefix.rstrip('_') or table_prefix
        self.mapTables()
    def __init__(self, engine, metadata, connection_class=Connection, connection_params=None,
                 table_prefix=''):
        ConnectionManager.__init__(self, connection_class=connection_class,
            connection_params=connection_params)

        self.engine = engine
        self.metadata = metadata
        self.table_prefix = table_prefix and "%s_" % table_prefix.rstrip('_') or table_prefix
        self.mapTables()
    def __init__(self,
                 connection_class=GaeConnection,
                 connection_params=None,
                 touch_time=10000):
        ConnectionManager.__init__(self,
                                   connection_class=connection_class,
                                   connection_params=connection_params)

        # Reduces number of writes to 'last_active' field.
        self.touch_time = touch_time
Exemple #11
0
    def __init__(self):
        LOG.debug("JellyfinClient initializing...")

        self.config = Config()
        self.http = HTTP(self)
        self.wsc = WSClient(self)
        self.auth = ConnectionManager(self)
        self.jellyfin = api.API(self.http)
        self.callback_ws = callback
        self.callback = callback
Exemple #12
0
    def __get__(self, instance, owner):
        class_name = _class_name(owner)
        config_name = owner.config_name
        config_name = config_name if config_name else class_name

        config_col = ConnectionManager.get_config(config_name).col
        if not config_col:
            collection_name = owner.collection_name
            document_name = collection_name if collection_name else class_name
            return ConnectionManager.get_config(config_name).db[document_name]

        return config_col
Exemple #13
0
    def __get__(self, instance, owner):
        class_name = _class_name(owner)
        config_name = owner.config_name
        config_name = config_name if config_name else class_name

        config_col = ConnectionManager.get_config(config_name).col
        if not config_col:
            collection_name = owner.collection_name
            document_name = collection_name if collection_name else class_name
            return ConnectionManager.get_config(config_name).db[document_name]

        return config_col
Exemple #14
0
    def __init__(self, main_window, port, service_id):
        super().__init__()
        self.main_window = main_window
        #ConnectionManager (create Slots)
        self.connection_manager = ConnectionManager(port, service_id)
        self.message_manager = MessageManager(self.connection_manager,
                                              service_id)
        self.message_manager.signal_message_received.connect(self.print_string)
        self.message_manager.signal_election_responded.connect(
            self.set_election)
        self.message_manager.signal_leader_responded.connect(
            self.leader_has_been_found)
        self.message_manager.signal_has_message_to_send.connect(
            self.send_udp_message)
        self.message_manager.signal_leader_alive.connect(self.set_alive)

        data = {"port": str(port), "leader": "Null"}
        jdata = json.dumps(data)
        self.main_window.set_details(jdata)
        self.main_window.signal_has_message_to_send.connect(
            self.send_udp_message)
        self.main_window.signal_has_multicast_to_send.connect(
            self.send_multicast_message)
        self.main_window.signal_start_checking.connect(self.close)

        self.own_port = port
        self.service_id = service_id
        self.leader_ip = None
        self.leader_port = 0
        self.leader_id = " "
        self.election = True
        self.leader_alive = False
        self.iam_leader = False

        self.control_boolean = False
        self.alive_checker_thread = AliveChecker(self, self.leader_ip,
                                                 self.leader_port,
                                                 self.connection_manager)
        self.alive_checker_thread.signal_election_is_due.connect(
            self.do_election)
        self.message_manager.signal_received_ok.connect(self.receive_ok)
        self.message_manager.signal_critical_request.connect(
            self.receive_request)

        self.worker_controller = WorkerController(1)
        self.worker_controller.signal_request_critical.connect(
            self.request_critical)
        self.worker_controller.signal_send_free_message.connect(
            self.send_free_message)
        #do first multicast
        self.th = th.Thread(target=self.request_group)
        self.th.start()
Exemple #15
0
    def __init__(self, port):
        super(AWSIOTDeviceAdapter, self).__init__()

        self.set_config('default_timeout', 5.0)

        reg = ComponentRegistry()
        endpoint = reg.get_config('awsiot-endpoint')
        rootcert = reg.get_config('awsiot-rootcert')
        iamuser = reg.get_config('awsiot-iamkey')
        iamsecret = reg.get_config('awsiot-iamtoken')
        iamsession = reg.get_config('awsiot-session', default=None)

        args = {}
        args['endpoint'] = endpoint
        args['root_certificate'] = rootcert
        args['use_websockets'] = True
        args['iam_key'] = iamuser
        args['iam_secret'] = iamsecret
        args['iam_session'] = iamsession

        self._logger = logging.getLogger(__name__)

        # Port should be a topic prefix that allows us to connect
        # only to subset of IOTile devices managed by a gateway
        # rather than to directly accessible iotile devices.
        if port is None:
            port = ""

        if len(port) > 0 and port[-1] != '/':
            port = port + '/'

        self.client = OrderedAWSIOTClient(args)
        self.name = str(uuid.uuid4())
        self.client.connect(self.name)
        self.prefix = port

        self.conns = ConnectionManager(self.id)
        self.conns.start()

        self.client.subscribe(self.prefix + 'devices/+/data/advertisement',
                              self._on_advertisement,
                              ordered=False)

        self._deferred = queue.Queue()

        self.set_config('minimum_scan_time', 5.0)
        self.set_config('probe_supported', True)
        self.set_config('probe_required', True)
        self.mtu = self.get_config(
            'mtu', 60 * 1024)  # Split script payloads larger than this
Exemple #16
0
class RCScreenApp(App):
    """
    Responsible for the whole client's application.
    """
    screen_image_format = StringProperty(DEFAULT_SCREEN_IMAGE_FORMAT)
    username = StringProperty("")
    password = StringProperty("")
    # TODO: can connect_screen handle this?
    is_controller = BooleanProperty(True)
    # TODO: can connect_screen handle this?
    partner = StringProperty("")
    connection_manager = ObjectProperty(ConnectionManager())
    x_sensitivity = NumericProperty(10, min=0)
    y_sensitivity = NumericProperty(10, min=0)
    screen_size = ListProperty()
    other_screen_width = NumericProperty(0)
    other_screen_height = NumericProperty(0)

    def on_stop(self):
        """
        Close connection_manager
        """
        if self.connection_manager.running:
            self.connection_manager.close()

    def build(self):
        """
        when the app is created
        """
        try:
            self.icon = ICON_PATH
        except Exception as e:
            print(e)
        return super().build()
Exemple #17
0
    def setUp(self):
        httpretty.enable()
        create_environment()
        make_fake_dir()
        with open(CONFIG_FILEPATH, 'r') as fo:
            self.cfg = json.load(fo)

        self.auth = (self.cfg['user'], self.cfg['pass'])
        self.base_url = ''.join(
            [self.cfg['server_address'], self.cfg['api_suffix']])
        self.files_url = ''.join([self.base_url, 'files/'])
        self.actions_url = ''.join([self.base_url, 'actions/'])
        self.shares_url = ''.join([self.base_url, 'shares/'])
        self.user_url = ''.join([self.base_url, 'users/'])

        self.cm = ConnectionManager(self.cfg)
Exemple #18
0
 def __init__(self):
     self.logger = logging.getLogger('backdoor')
     self.logger.setLevel(logging.DEBUG)
     ch = logging.StreamHandler()
     ch.setLevel(logging.DEBUG)
     formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     ch.setFormatter(formatter)
     self.logger.addHandler(ch)
     self.logger.info('Starting backdoor.')
     signal.signal(signal.SIGINT, self.stop)
     self.running = True
     self.connection_manager = ConnectionManager(config.api_host, config.api_port)
     self.connection_manager.start()
     self.methods = {}
     self.services = []
     self.load_services()
Exemple #19
0
class LoadIfileManager:

	def __init__(self, connectionStr):
		self.connMgr = ConnectionManager()
		self.conn = self.connMgr.connectToDatabase(connectionStr)
		self.cur = self.conn.cursor()
		self.fdm = ForeignDataManager()
		#print("Load IFile Manager Initialized.")

	def dropForeignTable(self, fdwTableName):
		self.cur.execute("drop foreign table if exists "+fdwTableName+";")
		#print("drop foreign table if exists "+fdwTableName+";")

	def createForeignTable(self, iFile, fTableName):
		header, fdwScript = self.fdm.generateFDWScript(iFile, fTableName)
		self.cur.execute(fdwScript)
		return header

	def createFileWithDerivedIds(self, outputFilePath, derivedIdSql):
		copyStmt = "copy ("+derivedIdSql+") to '"+outputFilePath+"' with delimiter E'\\t'"+" csv header;"
		#print("copyStmt = "+copyStmt)
		self.cur.execute(copyStmt)

	def createFileWithoutDuplicates(self, outputFilePath, noDupsSql):
		copyStmt = "copy ("+noDupsSql+") to '"+outputFilePath+"' with delimiter E'\\t'"+" csv header;"
		#print("copyStmt = "+copyStmt)
		self.cur.execute(copyStmt)

	def loadData(self, tableName, header, fileToLoad, primaryKeyColumnName):
		loadSql = "copy "+tableName+" ("+(",".join(header))+")"+" from '"+fileToLoad+"' with delimiter E'\\t' csv header;"
		#print("loadSql = "+loadSql)
		self.updateSerialSequence(tableName, primaryKeyColumnName)
		self.cur.execute(loadSql)

	def updateSerialSequence(self, tableName, primaryKeyColumnName):
		updateSeqSql = "SELECT pg_catalog.setval(pg_get_serial_sequence('"+tableName+"', '"+primaryKeyColumnName+"'), MAX("+primaryKeyColumnName+")) FROM "+tableName+";"
		#print("updateSeqSql = "+updateSeqSql)
		self.cur.execute(updateSeqSql)

	def commitTransaction(self):
		self.conn.commit()

	def rollbackTransaction(self):
		self.conn.rollback()

	def closeConnection(self):
		self.connMgr.disconnectFromDatabase()
Exemple #20
0
    def __init__(self, my_port, node_host=None, node_port=None):
        self.my_ip = self.__get_myip()
        print('Server IP address is set to ... ', self.my_ip)
        self.my_port = my_port
        self.cm = ConnectionManager(self.my_ip, self.my_port, self.__handle_message)
        self.node_host = node_host
        self.node_port = node_port
        self.bm = BlockchainManager()
        self.tp = TransactionPool()

        self.clock = 0
        self.status = STATUS_IDLE
        self.next_status = STATUS_IDLE
        self.new_txs = []
        self.end_mining_clock = None
        self.new_block = None
        self.to_port = None
Exemple #21
0
class JellyfinClient(object):

    logged_in = False

    def __init__(self):
        LOG.debug("JellyfinClient initializing...")

        self.config = Config()
        self.http = HTTP(self)
        self.wsc = WSClient(self)
        self.auth = ConnectionManager(self)
        self.jellyfin = api.API(self.http)
        self.callback_ws = callback
        self.callback = callback

    def set_credentials(self, credentials=None):
        self.auth.credentials.set_credentials(credentials or {})

    def get_credentials(self):
        return self.auth.credentials.get_credentials()

    def authenticate(self, credentials=None, options=None):

        self.set_credentials(credentials or {})
        state = self.auth.connect(options or {})

        if state['State'] == CONNECTION_STATE['SignedIn']:

            LOG.info("User is authenticated.")
            self.logged_in = True
            self.callback("ServerOnline", {'Id': self.auth.server_id})

        state['Credentials'] = self.get_credentials()

        return state

    def start(self, websocket=False, keep_alive=True):

        if not self.logged_in:
            raise ValueError("User is not authenticated.")

        self.http.start_session()

        if keep_alive:
            self.http.keep_alive = True

        if websocket:
            self.start_wsc()

    def start_wsc(self):
        self.wsc.start()

    def stop(self):

        self.wsc.stop_client()
        self.http.stop_session()
    def deleteConnection(self, connection):
        lock_name = self.getLockName(self.CONNECTIONS_ATTR)
        self._lock.acquire(lock_name)
        try:
            connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
            for i, connection_id in enumerate(connection_ids):
                if connection_id == connection.id:
                    connection_ids.pop(i)
                    break
            self.mc.set(self.CONNECTIONS_ATTR, connection_ids)
        finally:
            self._lock.release(lock_name)

        keys = [self.getKeyName(connection.id, attr) for attr in self.ATTRIBUTES]
        self.mc.delete_multi(keys)

        self.decrementChannelCount(connection.channel_name)

        ConnectionManager.deleteConnection(self, connection)
    def deleteConnection(self, connection):
        lock_name = self.getLockName(self.CONNECTIONS_ATTR)
        self._lock.acquire(lock_name)
        try:
            connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
            for i, connection_id in enumerate(connection_ids):
                if connection_id == connection.id:
                    connection_ids.pop(i)
                    break
            self.mc.set(self.CONNECTIONS_ATTR, connection_ids)
        finally:
            self._lock.release(lock_name)

        keys = [self.getKeyName(connection.id, attr) for attr in self.ATTRIBUTES]
        self.mc.delete_multi(keys)

        self.decrementChannelCount(connection.channel_name)

        ConnectionManager.deleteConnection(self, connection)
    def deleteConnection(self, connection):
        # Delete referenced properties 1st
        db.delete(GaeConnectionModel.last_active.get_value_for_datastore(connection.model))
        db.delete(GaeConnectionModel.connected.get_value_for_datastore(connection.model))
        db.delete(GaeConnectionModel.last_polled.get_value_for_datastore(connection.model))

        # Optional referenced properties
        authentication_key = GaeConnectionModel.authentication.get_value_for_datastore(connection.model)
        if authentication_key is not None:
            db.delete(authentication_key)

        session_key = GaeConnectionModel.session.get_value_for_datastore(connection.model)
        if session_key is not None:
            db.delete(session_key)

        # Delete connection
        connection.model.delete()
        connection.model = None
        ConnectionManager.deleteConnection(self, connection)

        db.run_in_transaction(self._decrementChannelCount, connection.channel_name)
Exemple #25
0
def Initialize(port, functionMap={}, asyncHandler=None):
    '''
    
    Initializes BlackBoard with the corresponding parameters.
    
    :param int port: The port through which BlackBoard will communicate with this module.
    :param dictionary functionMap: A dictionary containing **key:value** pairs, where the *key* is the name of a command received (a string),
        and the *value* is either a tuple containing a function as a first element and a boolean as a second element, or a function.
        The function in both cases is the function that is going to execute the specified command and receives on object of type :class:`Command` (See :ref:`Creating a command handler <creating_a_command_handler>`).
        The boolean value indicates whether the execution of that command should be synchronous (on the same thread) or asynchronous,
        usually synchronous execution is preferred for fast commands that can answer almost immediately and asynchronous for commands that might take a little time.
        When the value is only a function, by default the execution is synchronous. *functionMap* can also contain an entry with a string containing only an asterisk,
        meaning that would be the handler in case no other handler is found for a specific command.
        
        .. note::

            Notice that although functionMap can include a wildcard handler and this might seem like the module could answer
            anything, BlackBoard will only send commands that are registered under this module's configuration.
        
    :param function asyncHandler: A function that would handle the response of commands when sent with the method :func:`Send`
        instead of using :func:`SendAndWait`. This means the execution of a program that sends a command could continue
        and an asynchronous handler would handle the response when one is received.

        .. note::
    
            Notice that the asyncHandler functionality could also be achieved using a :class:`ParallelSender` object,
            but it has other implications.
    
    '''
    global _executors, _connMan, _parser, _p, _initialized, _ready

    _executors = {
        'busy': (lambda x: Response('busy'), False),
        'ready': (_isReady, False),
        'alive': (lambda x: Response('alive', True), False)
    }

    for m in functionMap:
        if isinstance(functionMap[m], types.FunctionType):
            _executors[m] = (functionMap[m], False)
        elif isinstance(functionMap[m], tuple):
            _executors[m] = functionMap[m]
        else:
            print 'Element in function map is not a function nor a correct tuple: ' + repr(
                functionMap[m])

    _connMan = ConnectionManager(port)
    _parser = CommandParser(asyncHandler)

    _p = threading.Thread(target=_MainThread)
    _p.daemon = True

    _initialized = True
    def __init__(self):
        RegexMatchingEventHandler.__init__(self, ignore_regexes=Daemon.IGNORED_REGEX, ignore_directories=True)

        # Just Initialize variable the Daemon.start() do the other things
        self.daemon_state = 'down'  # TODO implement the daemon state (disconnected, connected, syncronizing, ready...)
        self.running = 0
        self.client_snapshot = {} # EXAMPLE {'<filepath1>: ['<timestamp>', '<md5>', '<filepath2>: ...}
        self.local_dir_state = {} # EXAMPLE {'last_timestamp': '<timestamp>', 'global_md5': '<md5>'}
        self.listener_socket = None
        self.observer = None
        self.cfg = self.load_cfg(Daemon.CONFIG_FILEPATH)
        self.init_sharing_path()
        self.conn_mng = ConnectionManager(self.cfg)
Exemple #27
0
 def __init__(self):
     self.logger = logging.getLogger('backdoor')
     self.logger.setLevel(logging.DEBUG)
     ch = logging.StreamHandler()
     ch.setLevel(logging.DEBUG)
     formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     ch.setFormatter(formatter)
     self.logger.addHandler(ch)
     self.logger.info('Starting backdoor.')
     signal.signal(signal.SIGINT, self.stop)
     self.running = True
     self.connection_manager = ConnectionManager(config.api_host, config.api_port)
     self.connection_manager.start()
    def __init__(self, cfg_path=None, sharing_path=None):
        FileSystemEventHandler.__init__(self)
        # Just Initialize variable the Daemon.start() do the other things
        self.daemon_state = 'down'  # TODO implement the daemon state (disconnected, connected, syncronizing, ready...)
        self.running = 0
        self.client_snapshot = {}  # EXAMPLE {'<filepath1>: ['<timestamp>', '<md5>', '<filepath2>: ...}
        self.local_dir_state = {}  # EXAMPLE {'last_timestamp': '<timestamp>', 'global_md5': '<md5>'}
        self.listener_socket = None
        self.observer = None
        self.cfg = self._load_cfg(cfg_path, sharing_path)
        self.password = self._load_pass()
        self._init_sharing_path(sharing_path)

        self.conn_mng = ConnectionManager(self.cfg)
    def setUp(self):
        httpretty.enable()
        create_environment()
        make_fake_dir()
        with open(CONFIG_FILEPATH, 'r') as fo:
            self.cfg = json.load(fo)

        self.auth = (self.cfg['user'], self.cfg['pass'])
        self.base_url = ''.join([self.cfg['server_address'], self.cfg['api_suffix']])
        self.files_url = ''.join([self.base_url, 'files/'])
        self.actions_url = ''.join([self.base_url, 'actions/'])
        self.shares_url = ''.join([self.base_url, 'shares/'])
        self.user_url = ''.join([self.base_url, 'users/'])

        self.cm = ConnectionManager(self.cfg)
def main():
    config = ConfigParser()
    config.read("config.ini")

    cm_config = {
        "host": config.get("SQL", "host_address"),
        "port": config.getint("SQL", "port"),
        "user": "",
        "passwd": "",
        "database": config.get("SQL", "database"),
    }

    cm = ConnectionManager(cm_config)
    ui = UserInterface(cm)
    try:
        ui.start()
    except Exception as e:
        print(e)
Exemple #31
0
    def __init__(self, conf_file):
        self.config = ConfigParser.ConfigParser(allow_no_value=True)
        self.clean_time_gap = None
        self.wait_time_for_slave = None
        self.master_queue_name = None
        self.task_queue_name = None
        self.task_queue_size_limit = None
        self.task_file_name = None
        self.task_counter_file = None
        self.ssh_key = None
        self.s3_bucket = None
        self.s3_folder = None
        self.slave_num_every_packup = None
        self.slave_max_sec_each_task = None
        self.slave_python_version = None
        self.master_ip = None
        self.slaves_ip = None
        self.slave_awake_frequency = None
        self.configure(conf_file)

        self.last_wake_time = None

        self.repeated_timer = None
        self.is_started = False
        self.pop_forever_handler = None

        logging.info('Starting task manager...')
        self.task_manager = TaskManager(self.task_file_name, self.task_counter_file)
        logging.info('Starting slave manager...')
        self.slave_manager = SlaveManager(master_ip=self.master_ip,
                                          slaves_ip=self.slaves_ip,
                                          ssh_key=self.ssh_key,
                                          s3_bucket=self.s3_bucket,
                                          s3_folder=self.s3_folder,
                                          slave_num_every_packup=self.slave_num_every_packup,
                                          slave_max_sec_each_task=self.slave_max_sec_each_task,
                                          slave_python_version=self.slave_python_version,
                                          slave_awake_frequency=self.slave_awake_frequency,
                                          slave_buffer_size=1)
        logging.info('Starting connection manager...')
        self.message_connection = ConnectionManager(queue_name=self.master_queue_name,
                                                    durable=False,
                                                    callback=self.msg_callback,
                                                    no_ack=True)
def main():
    main_app = FastAPI()

    manager = ConnectionManager()

    @main_app.websocket("/ws/{client_id}")
    async def websocket_endpoint(websocket: WebSocket, client_id: UUID4):
        await manager.connect(websocket, client_id)
        try:
            while True:
                ws_data = await websocket.receive_text()
                message = Message(sender_id=client_id, **json.loads(ws_data))
                print(f'User {message.sender_id} sends to user {message.receiver_id} the message {message.text}')
                await manager.send_personal_message(message)
                await manager.send_message_to_target(message)
        except WebSocketDisconnect:
            manager.disconnect(websocket, client_id)

    return main_app
    def setUp(self):
        httpretty.enable()

        with open(TestConnectionManager.CONFIG_FILEPATH, 'r') as fo:
            self.cfg = json.load(fo)

        self.auth = (self.cfg['user'], self.cfg['pass'])
        # override
        self.cfg['server_address'] = "http://www.pyboxtest.com"
        self.cfg['sharing_path'] = os.path.join(os.getcwd(), "sharing_folder")


        # create this auth testing
        self.authServerAddress = "http://" + self.cfg['user'] + ":" + self.cfg['pass'] + "@www.pyboxtest.com"
        # example of self.base_url = 'http://localhost:5000/API/V1/'
        self.base_url = ''.join([self.cfg['server_address'], self.cfg['api_suffix']])
        self.files_url = ''.join([self.base_url, 'files/'])
        self.actions_url = ''.join([self.base_url, 'actions/'])
        self.shares_url = ''.join([self.base_url, 'shares/'])

        self.cm = ConnectionManager(self.cfg)
        self.make_fake_dir()
Exemple #34
0
    def start_popping_tasks(self):
        task_connection = ConnectionManager(queue_name=self.task_queue_name,
                                            durable=True, no_ack=False)
        eof_reached = False
        while self.is_started and not eof_reached:
            current_task_queue_size = task_connection.get_task_queue_size()
            while self.is_started and current_task_queue_size < self.task_queue_size_limit:
                task = self.task_manager.pop_task()
                if task is None:
                    # TODO: Don't use Error. Just break and handle the case later in this function
                    logging.info('EOF Reached')
                    eof_reached = True
                    break
                message = 'WORK ' + ujson.dumps(task)
                task_connection.publish(message)
                current_task_queue_size += 1

        task_connection.stop()
Exemple #35
0
    def __get__(self, instance, owner):
        config_name = owner.config_name

        config_name = config_name if config_name else _class_name(owner)
        return getattr(ConnectionManager.get_config(config_name),
                       self.attr_name)
class TestConnectionManager(unittest.TestCase):
    CONFIG_DIR = os.path.join(os.environ['HOME'], '.PyBox')
    CONFIG_FILEPATH = os.path.join(CONFIG_DIR, 'daemon_config')
    LOCAL_DIR_STATE_PATH = os.path.join(CONFIG_DIR, 'dir_state')

    def setUp(self):
        httpretty.enable()

        with open(TestConnectionManager.CONFIG_FILEPATH, 'r') as fo:
            self.cfg = json.load(fo)

        self.auth = (self.cfg['user'], self.cfg['pass'])
        # override
        self.cfg['server_address'] = "http://www.pyboxtest.com"
        self.cfg['sharing_path'] = os.path.join(os.getcwd(), "sharing_folder")


        # create this auth testing
        self.authServerAddress = "http://" + self.cfg['user'] + ":" + self.cfg['pass'] + "@www.pyboxtest.com"
        # example of self.base_url = 'http://localhost:5000/API/V1/'
        self.base_url = ''.join([self.cfg['server_address'], self.cfg['api_suffix']])
        self.files_url = ''.join([self.base_url, 'files/'])
        self.actions_url = ''.join([self.base_url, 'actions/'])
        self.shares_url = ''.join([self.base_url, 'shares/'])

        self.cm = ConnectionManager(self.cfg)
        self.make_fake_dir()

    # files:
    @httpretty.activate
    def test_download_normal_file(self):
        url = ''.join((self.files_url, 'file.txt'))

        httpretty.register_uri(httpretty.GET, url, status=201)
        data = {'filepath': 'file.txt'}
        response = self.cm.do_download(data)
        self.assertEqual(response, True)

    @httpretty.activate
    def test_download_file_not_exists(self):
        url = ''.join((self.files_url, 'file.tx'))

        httpretty.register_uri(httpretty.GET, url, status=404)
        data = {'filepath': 'file.tx'}
        response = self.cm.do_download(data)
        self.assertEqual(response, False)

    @httpretty.activate
    def test_do_upload_success(self):

        # prepare fake server
        url = ''.join((self.files_url, 'foo.txt'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        # call api
        response = self.cm.do_upload({'filepath': 'foo.txt'})
        self.assertEqual(response, recv_js)

    # actions:
    @httpretty.activate
    def test_do_move(self):
        url = ''.join((self.actions_url, 'move'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_move({'src': 'foo.txt', 'dst': 'folder/foo.txt'})
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_delete(self):
        url = ''.join((self.actions_url, 'delete'))

        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")
        d = {'filepath': 'foo.txt'}

        response = self.cm.do_delete(d)
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_modify(self):
        url = ''.join((self.files_url, 'foo.txt'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.PUT, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_modify({'filepath': 'foo.txt'})
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_copy(self):
        url = ''.join([self.actions_url, 'copy'])
        d = {'src': 'foo.txt', 'dst': 'folder/foo.txt'}
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_copy(d)
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_get_server_snapshot(self):
        url = self.files_url
        js = json.dumps({'files': 'foo.txt'})

        httpretty.register_uri(httpretty.GET, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_get_server_snapshot('')
        self.assertEqual(json.dumps(response), js)

    def tearDown(self):
        httpretty.disable()
        httpretty.reset()
        self.remove_fake_dir()

    def make_fake_dir(self):
        sharing_path = os.path.join(self.cfg['sharing_path'])

        if os.path.exists(sharing_path):
            shutil.rmtree(sharing_path)
        else:
            os.makedirs(os.path.join(self.cfg['sharing_path']))

        fake_file = os.path.join(self.cfg['sharing_path'], 'foo.txt')
        with open(fake_file, 'w') as fc:
            fc.write('foo.txt :)')

    def remove_fake_dir(self):
        shutil.rmtree(os.path.join(self.cfg['sharing_path']))
Exemple #37
0
class DataLogger:

    """
    In this class the different systems are initialiased:
        - logger and its handlers
        - local configuration
        - connection with internet

    If there is no connection with the internet:
        - A timed job is created that controls internet connection.
        - Logging of data is started with local configuration.
        - If the internet connection is started, and the online\
        configuration differs, the old wrongly logged data will be removed.

    If there is connection with the internet and the server is working:
        - Check if the online configuration differs from the local one. If so,\
        the configuration will be updated.
        - Logging of data is started.
        - Sending of data is started.
        - A timed job is created that checks if the online configuration is\
        updated
        - The management of leds is started.
    """

    def __init__(self):
        try:
            # initiate logger
            self.logger = logging.getLogger()
            self.logger.setLevel(logging.DEBUG)
            self.log_send_store_handler = LogSendStoreHandler(LOG_LOCATION)
            formatter = logging.Formatter(
                '%(asctime)s - %(levelname)s - %(name)s - %(message)s')
            self.log_send_store_handler.setFormatter(formatter)
            self.logger.addHandler(self.log_send_store_handler)
            self.logger.info('Initialising system...')
            job_info_filter = JobInfoFilter()
            logging.getLogger('apscheduler.scheduler').addFilter(
                job_info_filter)
            logging.getLogger('apscheduler.threadpool').addFilter(
                job_info_filter)

            # load local configuration
            self.conf_man = ConfigurationManager(CONFIG_LOCATION)
            self.log_send_store_handler.update_configuration()

            self.scheduler = Scheduler()
            self.scheduler.start()

            self.packet_manager = PacketManager(self.scheduler)

            # initiate network connection
            self.connection = ConnectionManager()

            # add scheduler and connection to log handler
            self.log_send_store_handler.update_configuration(
                scheduler=self.scheduler,
                connection=self.connection)

            # try to connect
            connected_to_internet = self.connection.check_internet_connection()
            connected_to_server = self.connection.check_server_connection()
            if connected_to_internet and connected_to_server:
                self.load_online_configuration_and_initiate_sending_data()
                self.packet_manager.update_time()
                self.packet_manager.initiate_send_packets(self.connection)
            else:
                '''
                if there is no connection:
                    keep checking for a connection
                    temporarily use offline timer and modbus slave
                    configuration
                '''

                if connected_to_internet:
                    self.packet_manager.update_time()
                self.wait_for_connection_to_load_configuration()

            # initiate sensor timers
            self.read_sensor_scheduler = ReadSensorScheduler(
                self.scheduler,
                self.packet_manager)
            self.led_manager = LedManager(self.scheduler)
            self.led_manager.update_led(PinName.powered, LedState.on)
            self.set_up_led_manager_calls()

            # sleep 2 seconds to intialise led of log handler
            sleep(1)
            self.logger.info('Initialisation complete')

            while True:
                sleep(10)
                self.logger.debug('Alive and kicking')
                if self.logger.level is logging.DEBUG:
                    scheduler_jobs = self.scheduler.get_jobs()
                    if len(scheduler_jobs) > 1:
                        self.logger.debug('Current scheduler jobs:')
                        for index, job in enumerate(scheduler_jobs):
                            self.logger.debug(' Job {0}: {1} {2}'.format(
                                index,
                                job.name,
                                job.next_run_time))
                    else:
                        self.logger.debug('No running scheduler jobs')

        except Exception as e:
            self.logger.error(e)
            raise
            self.log_send_store_handler.send_logs_job()

    def load_online_configuration_and_initiate_sending_data(self):
        # check online configuration
        try:
            online_checksum = self.connection.get_configuration_checksum()
            self.logger.info("Checking online configuration..")
            if self.conf_man.is_online_configuration_different(online_checksum):
                self.logger.info(
                    'Online configuration is new, updating configuration..')
                online_configuration = self.connection.get_configuration()
                self.conf_man.validate_json_configuration(online_configuration)
                self.conf_man.save_configuration_local(
                    online_checksum,
                    online_configuration)
                self.packet_manager.remove_all_packets_from_memory()

                # update systems that make use of the configuration
                self.log_send_store_handler.update_configuration(
                    scheduler=self.scheduler,
                    connection=self.connection)
                self.connection.update_configuration()
                try:
                    self.read_sensor_scheduler.update_configuration()
                except:
                    pass
                self.packet_manager.update_configuration()
        except:
            self.logger.warning('Problem updating configuration')
            raise
        try:  # try to remove job
            self.scheduler.unschedule_func(
                self.load_online_configuration_and_initiate_sending_data)
        except:
            pass

        # periodically check changes in configuration
        self.scheduler.add_interval_job(
            self.load_online_configuration_and_initiate_sending_data,
            seconds=configuration.get_time_interval_to_check_online_config())

        self.packet_manager.initiate_send_packets(self.connection)

    def wait_for_connection_to_load_configuration(self):
        if not self.connection.is_connected():
            # no internet connection, start job to check connection
            self.scheduler.add_interval_job(self.try_to_connect_to_internet,
                                            seconds=CHECK_CONNECTION_INTERVAL)
        else:
            self.packet_manager.update_time()
            if not self.connection.check_server_connection():
                # no connection with server, start job to check connection
                self.scheduler.add_interval_job(
                    self.try_to_load_online_configuration,
                    seconds=CHECK_CONNECTION_INTERVAL)

    def try_to_connect_to_internet(self):
        if self.connection.check_internet_connection():
            self.scheduler.unschedule_func(self.try_to_connect_to_internet)

            self.packet_manager.update_time()
            if not self.connection.check_server_connection():
                    # no connection with server, start job to check connection
                self.scheduler.add_interval_job(
                    self.try_to_load_online_configuration,
                    seconds=CHECK_CONNECTION_INTERVAL)
            else:
                self.load_online_configuration_and_initiate_sending_data()

    def try_to_load_online_configuration(self):
        if self.connection.check_server_connection():
            self.load_online_configuration_and_initiate_sending_data()

            self.scheduler.unschedule_func(
                self.try_to_load_online_configuration)

    def set_up_led_manager_calls(self):
        sensor_led_call = LedCall(self.led_manager, PinName.readingsensor)
        connected_led_call = LedCall(self.led_manager, PinName.connected)
        logging_led_call = LedCall(self.led_manager, PinName.logging)

        self.read_sensor_scheduler.set_led_call(sensor_led_call)
        self.connection.set_led_call(connected_led_call)
        self.log_send_store_handler.set_led_call(logging_led_call)
class TestConnectionManager(unittest.TestCase):

    def setUp(self):
        httpretty.enable()
        create_environment()
        with open(CONFIG_FILEPATH, 'r') as fo:
            self.cfg = json.load(fo)

        self.auth = (self.cfg['user'], self.cfg['pass'])
        # override
        self.cfg['server_address'] = "http://www.pyboxtest.com"
        self.cfg['sharing_path'] = os.path.join(os.getcwd(), "sharing_folder")

        # create this auth testing
        self.authServerAddress = "http://" + self.cfg['user'] + ":" + self.cfg['pass'] + "@www.pyboxtest.com"
        # example of self.base_url = 'http://localhost:5000/API/V1/'
        self.base_url = ''.join([self.cfg['server_address'], self.cfg['api_suffix']])
        self.files_url = ''.join([self.base_url, 'files/'])
        self.actions_url = ''.join([self.base_url, 'actions/'])
        self.shares_url = ''.join([self.base_url, 'shares/'])
        self.user_url = ''.join([self.base_url, 'users/'])

        self.cm = ConnectionManager(self.cfg)
        self.make_fake_dir()

    @httpretty.activate
    def test_register_user(self):
        """
        Test register user api:
        method = POST
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = ''.join((self.user_url, USR))
        content = 'user activated'
        content_jsoned = json.dumps(content)
        httpretty.register_uri(httpretty.POST, url, status=200, body=content_jsoned)
        response = self.cm.do_register(data)
        self.assertIn('content', response)
        self.assertEqual(response['content'], content)

    @httpretty.activate
    def test_register_user_with_weak_password(self):
        """
        Test register user api with weak password:
        method = POST
        resource = <user>
        data = password=<password>
        """
        weak_password = '******'
        data = (USR, weak_password)
        url = ''.join((self.user_url, USR))
        content = {'type_of_improvement': 'improvement suggested'}
        content_jsoned = json.dumps(content)
        httpretty.register_uri(httpretty.POST, url, status=403, body=content_jsoned)
        response = self.cm.do_register(data)
        self.assertIn('improvements', response)
        self.assertEqual(response['improvements'], content)

    @httpretty.activate
    def test_register_user_with_already_existent_user(self):
        """
        Test register user api with already existent user:
        method = POST
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = ''.join((self.user_url, USR))
        # This is the only case where server doesn't send data with the message error
        httpretty.register_uri(httpretty.POST, url, status=409)
        response = self.cm.do_register(data)
        self.assertIn('content', response)
        self.assertIsInstance(response['content'], str)

    @httpretty.activate
    def test_fail_to_register_user(self):
        """
        Test failed register request
        Test activate user api:
        method = POST
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = ''.join((self.user_url, USR))
        httpretty.register_uri(httpretty.POST, url, status=500)

        response = self.cm.do_register(data)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_activate_user(self):
        """
        Test successful activation
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = '6c9fb345c317ad1d31ab9d6445d1a820'
        data = (user, token)
        url = ''.join((self.user_url, user))
        answer = 'user activated'
        answer_jsoned = json.dumps(answer)
        httpretty.register_uri(httpretty.PUT, url, status=201, body=answer_jsoned)

        response = self.cm.do_activate(data)
        self.assertIsInstance(response['content'], unicode)
        self.assertTrue(response['successful'])

    @httpretty.activate
    def test_activate_user_already_existent(self):
        """
        Test activate user already existent
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = 'bad_token'
        data = (user, token)
        url = ''.join((self.user_url, user))
        httpretty.register_uri(httpretty.PUT, url, status=409)

        response = self.cm.do_activate(data)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_activate_user_not_existent(self):
        """
        Test activate user not existent
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = 'bad_token'
        data = (user, token)
        url = ''.join((self.user_url, user))
        httpretty.register_uri(httpretty.PUT, url, status=404)

        response = self.cm.do_activate(data)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_fail_to_activate_user(self):
        """
        Test failed activation request
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = 'bad_token'
        data = (user, token)
        url = ''.join((self.user_url, user))
        httpretty.register_uri(httpretty.PUT, url, status=500)

        response = self.cm.do_activate(data)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_post_recover_password_not_found(self):
        """
        Test that if /users/<email>/reset POST == 404 then cm return None
        """
        # An unknown user (neither registered nor pending) is a resource not found for the server...
        email = '*****@*****.**'
        url = self.user_url + email + '/reset'
        # ...so the server should return a 404:
        httpretty.register_uri(httpretty.POST, url, status=404)
        # and the command manager must return None in this case
        response = self.cm.do_reqrecoverpass(email)
        self.assertIsNone(response)

    @httpretty.activate
    def test_post_recover_password_accept(self):
        """
        Test that if /users/<email>/reset POST == 202 then cm return True
        """
        email = '*****@*****.**'
        url = self.user_url + email + '/reset'
        httpretty.register_uri(httpretty.POST, url, status=202)
        response = self.cm.do_reqrecoverpass(email)
        self.assertTrue(response)

    @httpretty.activate
    def test_put_recover_password_not_found(self):
        """
        Test that if /users/<email> PUT == 404 then cm return None
        """
        email = '*****@*****.**'
        recoverpass_code = os.urandom(16).encode('hex')
        new_password = '******'
        url = self.user_url + email
        httpretty.register_uri(httpretty.PUT, url, status=404)
        data = email, recoverpass_code, new_password
        response = self.cm.do_recoverpass(data)
        self.assertFalse(response)

    @httpretty.activate
    def test_put_recover_password_ok(self):
        """
        Test that if /users/<email> PUT == 200 then cm return True
        """
        email = '*****@*****.**'
        recoverpass_code = os.urandom(16).encode('hex')
        new_password = '******'
        url = self.user_url + email
        httpretty.register_uri(httpretty.PUT, url, status=200)
        data = email, recoverpass_code, new_password
        response = self.cm.do_recoverpass(data)
        self.assertTrue(response)

    # files:
    @httpretty.activate
    def test_download_normal_file(self):
        url = ''.join((self.files_url, 'file.txt'))

        httpretty.register_uri(httpretty.GET, url, status=201)
        data = {'filepath': 'file.txt'}
        response = self.cm.do_download(data)
        self.assertEqual(response, True)

    @httpretty.activate
    def test_download_file_not_exists(self):
        url = ''.join((self.files_url, 'file.tx'))

        httpretty.register_uri(httpretty.GET, url, status=404)
        data = {'filepath': 'file.tx'}
        response = self.cm.do_download(data)
        self.assertEqual(response, False)

    @httpretty.activate
    def test_do_upload_success(self):

        # prepare fake server
        url = ''.join((self.files_url, 'foo.txt'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        # call api
        response = self.cm.do_upload({'filepath': 'foo.txt', 'md5': 'test_md5'})
        self.assertEqual(response, recv_js)

    # actions:
    @httpretty.activate
    def test_do_move(self):
        url = ''.join((self.actions_url, 'move'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_move({'src': 'foo.txt', 'dst': 'folder/foo.txt'})
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_delete(self):
        url = ''.join((self.actions_url, 'delete'))

        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")
        d = {'filepath': 'foo.txt'}

        response = self.cm.do_delete(d)
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_modify(self):
        url = ''.join((self.files_url, 'foo.txt'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.PUT, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_modify({'filepath': 'foo.txt', 'md5': 'test_md5'})
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_copy(self):
        url = ''.join([self.actions_url, 'copy'])
        d = {'src': 'foo.txt', 'dst': 'folder/foo.txt'}
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_copy(d)
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_get_server_snapshot(self):
        url = self.files_url
        js = json.dumps({'files': 'foo.txt'})

        httpretty.register_uri(httpretty.GET, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_get_server_snapshot('')
        self.assertEqual(json.dumps(response), js)

    def tearDown(self):
        httpretty.disable()
        httpretty.reset()
        self.remove_fake_dir()

    def make_fake_dir(self):
        sharing_path = os.path.join(self.cfg['sharing_path'])

        if os.path.exists(sharing_path):
            shutil.rmtree(sharing_path)
        else:
            os.makedirs(os.path.join(self.cfg['sharing_path']))

        fake_file = os.path.join(self.cfg['sharing_path'], 'foo.txt')
        with open(fake_file, 'w') as fc:
            fc.write('foo.txt :)')

    def remove_fake_dir(self):
        shutil.rmtree(os.path.join(self.cfg['sharing_path']))
class TestConnectionManager(unittest.TestCase):
    CONFIG_DIR = os.path.join(os.environ['HOME'], '.PyBox')
    CONFIG_FILEPATH = os.path.join(CONFIG_DIR, 'daemon_config')
    LOCAL_DIR_STATE_PATH = os.path.join(CONFIG_DIR, 'dir_state')

    def setUp(self):
        httpretty.enable()

        with open(TestConnectionManager.CONFIG_FILEPATH, 'r') as fo:
            self.cfg = json.load(fo)

        self.auth = (self.cfg['user'], self.cfg['pass'])
        # override
        self.cfg['server_address'] = "http://www.pyboxtest.com"
        self.cfg['sharing_path'] = os.path.join(os.getcwd(), "sharing_folder")

        # create this auth testing
        self.authServerAddress = "http://" + self.cfg['user'] + ":" + self.cfg['pass'] + "@www.pyboxtest.com"
        # example of self.base_url = 'http://localhost:5000/API/V1/'
        self.base_url = ''.join([self.cfg['server_address'], self.cfg['api_suffix']])
        self.files_url = ''.join([self.base_url, 'files/'])
        self.actions_url = ''.join([self.base_url, 'actions/'])
        self.shares_url = ''.join([self.base_url, 'shares/'])
        self.user_url = ''.join([self.base_url, 'users/'])

        self.cm = ConnectionManager(self.cfg)
        self.make_fake_dir()

    @httpretty.activate
    def test_register_user(self):
        """
        Test register user api:
        method = POST
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = ''.join((self.user_url, USR))
        content = 'user activated'
        content_jsoned = json.dumps(content)
        httpretty.register_uri(httpretty.POST, url, status=200, body= content_jsoned)
        response = self.cm.do_register(data)
        self.assertIn('content', response)
        self.assertEqual(response['content'], content)

    @httpretty.activate
    def test_register_user_with_weak_password(self):
        """
        Test register user api with weak password:
        method = POST
        resource = <user>
        data = password=<password>
        """
        weak_password = '******'
        data = (USR, weak_password)
        url = ''.join((self.user_url, USR))
        content = {'type_of_improvement': 'improvement suggested'}
        content_jsoned = json.dumps(content)
        httpretty.register_uri(httpretty.POST, url, status=403, body=content_jsoned)
        response = self.cm.do_register(data)
        self.assertIn('improvements', response)
        self.assertEqual(response['improvements'], content)

    @httpretty.activate
    def test_register_user_with_already_existent_user(self):
        """
        Test register user api with already existent user:
        method = POST
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = ''.join((self.user_url, USR))
        # This is the only case where server doesn't send data with the message error
        httpretty.register_uri(httpretty.POST, url, status=409)
        response = self.cm.do_register(data)
        response = self.cm.do_register(data)
        self.assertIn('content', response)
        self.assertIsInstance(response['content'], str)

    @httpretty.activate
    def test_activate_user(self):
        """
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = '6c9fb345c317ad1d31ab9d6445d1a820'
        data = (user, token)
        url = ''.join((self.user_url, user))

        httpretty.register_uri(httpretty.PUT, url, status=201, body='user activated')
        response = self.cm.do_activate(data)
        self.assertNotEqual(response, False)
        self.assertIsInstance(response, unicode)

        httpretty.register_uri(httpretty.PUT, url, status=404)
        self.assertFalse(self.cm.do_activate(data))

        httpretty.register_uri(httpretty.PUT, url, status=409)
        self.assertFalse(self.cm.do_activate(data))

    # files:
    @httpretty.activate
    def test_download_normal_file(self):
        url = ''.join((self.files_url, 'file.txt'))

        httpretty.register_uri(httpretty.GET, url, status=201)
        data = {'filepath': 'file.txt'}
        response = self.cm.do_download(data)
        self.assertEqual(response, True)

    @httpretty.activate
    def test_download_file_not_exists(self):
        url = ''.join((self.files_url, 'file.tx'))

        httpretty.register_uri(httpretty.GET, url, status=404)
        data = {'filepath': 'file.tx'}
        response = self.cm.do_download(data)
        self.assertEqual(response, False)

    @httpretty.activate
    def test_do_upload_success(self):

        # prepare fake server
        url = ''.join((self.files_url, 'foo.txt'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        # call api
        response = self.cm.do_upload({'filepath': 'foo.txt', 'md5': 'test_md5'})
        self.assertEqual(response, recv_js)

    # actions:
    @httpretty.activate
    def test_do_move(self):
        url = ''.join((self.actions_url, 'move'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_move({'src': 'foo.txt', 'dst': 'folder/foo.txt'})
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_delete(self):
        url = ''.join((self.actions_url, 'delete'))

        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")
        d = {'filepath': 'foo.txt'}

        response = self.cm.do_delete(d)
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_modify(self):
        url = ''.join((self.files_url, 'foo.txt'))
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.PUT, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_modify({'filepath': 'foo.txt', 'md5': 'test_md5'})
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_do_copy(self):
        url = ''.join([self.actions_url, 'copy'])
        d = {'src': 'foo.txt', 'dst': 'folder/foo.txt'}
        js = json.dumps({"server_timestamp": time.time()})
        recv_js = json.loads(js)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_copy(d)
        self.assertEqual(response, recv_js)

    @httpretty.activate
    def test_get_server_snapshot(self):
        url = self.files_url
        js = json.dumps({'files': 'foo.txt'})

        httpretty.register_uri(httpretty.GET, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_get_server_snapshot('')
        self.assertEqual(json.dumps(response), js)

    def tearDown(self):
        httpretty.disable()
        httpretty.reset()
        self.remove_fake_dir()

    def make_fake_dir(self):
        sharing_path = os.path.join(self.cfg['sharing_path'])

        if os.path.exists(sharing_path):
            shutil.rmtree(sharing_path)
        else:
            os.makedirs(os.path.join(self.cfg['sharing_path']))

        fake_file = os.path.join(self.cfg['sharing_path'], 'foo.txt')
        with open(fake_file, 'w') as fc:
            fc.write('foo.txt :)')

    def remove_fake_dir(self):
        shutil.rmtree(os.path.join(self.cfg['sharing_path']))
def sahibinden_scraper(num_page):
    """
    Web scraper for sahibinden.com
    param num_page: number of page that wanted to be scraped
    """
    cm = ConnectionManager()
    start_time = time.time()
    data_ids = []
    seller_names = []
    seller_links = []
    ad_titles = []
    ad_links = []
    prices = []
    counties = []
    districts = []
    other_items = [
    ]  # this is a container list for attirbutes like date, area, room, floor within building, etc.

    diff = num_page - 2
    last = ((diff * 20) + 20) + 1

    for t in range(0, last, 20):
        url = 'https://www.sahibinden.com/kiralik-daire/antalya?pagingOffset=' + str(
            t)
        r = cm.request(url)
        status_code = r.code if r != '' else -1  # Status code of request.
        if status_code == 200:
            so = bs4.BeautifulSoup(r.read(), 'lxml')
            table = so.find('table', id='searchResultsTable')

            tracker = 0
            requests = 0
            for i in table.findAll('tr'):

                if i.get(
                        'data-id'
                ) is None:  # means it is not a home ad. Maybe just a google ad.
                    continue  # ignore these data points

                else:  # if data points are home ads
                    ids = i.get('data-id')
                    names_seller = [
                        _.get('title') for _ in (i.find_all(
                            'a', attrs={'class': 'titleIcon store-icon'}))
                    ]
                    links_seller = [
                        _.get('href') for _ in (i.find_all(
                            'a', attrs={'class': 'titleIcon store-icon'}))
                    ]
                    titles_ad = [
                        ' '.join(_.text.split())
                        for _ in i.find_all('a',
                                            attrs={'class': 'classifiedTitle'})
                    ]
                    links_ad = [
                        'https://www.sahibinden.com' + _.get('href')
                        for _ in i.find_all('a',
                                            attrs={'class': 'classifiedTitle'})
                    ]

                    data_ids.append(ids)
                    seller_names.append(names_seller)
                    seller_links.append(links_seller)
                    ad_titles.append(titles_ad)
                    ad_links.append(links_ad)

                    #finding prices corresponding to ads
                    for z in i.find_all(
                            'td', attrs={'class': 'searchResultsPriceValue'}):
                        price = ' '.join(z.text.split())

                        if '.' in price:  # if price is like 7.100

                            # convert it to 7100 and keep it in 'prices' list
                            prices.append(price.replace('.', ''))

                        else:  # if it is not, just put it to the 'prices' list
                            prices.append(price)

                    # finding addresses corresponding to ads
                    for f in i.findAll(
                            'td',
                            attrs={'class': 'searchResultsLocationValue'}):
                        turkish_adress_name = ''.join(f.text.split(
                        ))  # name of county and district in Turkish

                        #converting Turkish charecters to English charecters
                        normalized = unicodedata.normalize(
                            'NFD', turkish_adress_name)
                        #converted_adress_name consists of county and district names.
                        converted_adress_name = u"".join([
                            c for c in normalized
                            if not unicodedata.combining(c)
                        ])

                        #first_letters is a list consists of first letters of county and district names. For example: ['M', 'F']
                        first_letters = re.findall('[A-Z]+',
                                                   converted_adress_name)

                        # if district name is not specified, then 'first_letters' will contain only one letter, like ['A']
                        try:
                            # first_letters[1] is a district name
                            # first_letters[0] is a county name
                            districts.append(converted_adress_name[
                                converted_adress_name.
                                find(first_letters[1], 1):])
                            counties.append(converted_adress_name[
                                converted_adress_name.
                                find(first_letters[0], 0):converted_adress_name
                                .find(first_letters[1], 1)])

                        except:  # index error
                            # it will occur, if first_letters includes only one letter.
                            # if district is not specified, put county name both district and county containers.
                            counties.append(
                                converted_adress_name[converted_adress_name.
                                                      find(first_letters[0]):])
                            districts.append(
                                converted_adress_name[converted_adress_name.
                                                      find(first_letters[0]):])

                    # finding other data points like date of ad, number of rooms, total area, floor within building etc.
                    # to do this, i will iterate through all of advertisement links
                    for y in links_ad:
                        start_time = time.time()
                        ur = cm.request(y)
                        tracker += 1
                        time.sleep(random.randint(2, 6))
                        requests += 1
                        elapsed_time = time.time() - start_time
                        print(
                            'Request:{}; Frequency: {} requests/s; elapsed time:{}'
                            .format(requests, requests / elapsed_time,
                                    elapsed_time))

                        soup = bs4.BeautifulSoup(ur.read(), 'lxml')
                        info = soup.find('div',
                                         attrs={'class': 'classifiedInfo'
                                                })  # attributes of ads

                        dct = {
                        }  # this will contain column names as keys, and data points as values.
                        for v in info.findAll('ul'):
                            head = v.find_all(
                                'strong'
                            )  # name of columns. i.e. 'number_of_rooms' column.
                            attribute = v.find_all(
                                'span'
                            )  # value corresponds to above particular column. i.e. (3+1)

                            for a, b in zip(head, attribute):
                                columns = ' '.join(a.text.split())
                                data = ' '.join(b.text.split())
                                dct[columns] = data

                        print(tracker)
                        if tracker % 5 == 0:
                            cm.new_identity(
                            )  # After sending 5 requests, change identity in order to avoid being blocked.

                        other_items.append(pd.DataFrame(data=dct, index=[0]))
            print('done')

        else:  # If status code of request is not 200.
            break

    print("--- %s seconds ---" % (time.time() - strt))
    return other_items, data_ids, ad_titles, ad_links, seller_names, seller_links, prices, counties, districts
    def __init__(self, connection_class=GaeConnection, connection_params=None, touch_time=10000):
        ConnectionManager.__init__(self, connection_class=connection_class,
            connection_params=connection_params)

        # Reduces number of writes to 'last_active' field.
        self.touch_time = touch_time
Exemple #42
0
class LoadIfileManager:
    WORK_MEM = 10240

    def __init__(self, connectionStr):
        self.connMgr = ConnectionManager()
        self.conn = self.connMgr.connectToDatabase(connectionStr)
        self.cur = self.conn.cursor()
        self.fdm = ForeignDataManager()
        self.cur.execute("set work_mem to %s", (self.WORK_MEM, ))
        #print("Load IFile Manager Initialized.")

    def dropForeignTable(self, fdwTableName):
        self.cur.execute("drop foreign table if exists " + fdwTableName + ";")
        #print("drop foreign table if exists "+fdwTableName+";")

    def createForeignTable(self, iFile, fTableName):
        header, fdwScript = self.fdm.generateFDWScript(iFile, fTableName)
        self.cur.execute(fdwScript)
        return header

    def createFileWithoutDuplicatesV1(self, outputFilePath, noDupsSql):
        copyStmt = "copy (" + noDupsSql + ") to '" + outputFilePath + "' with delimiter E'\\t'" + " csv header;"
        #print("copyStmt = "+copyStmt)
        self.cur.execute(copyStmt)

    def createFileWithoutDuplicates(self, outputFilePath, noDupsSql):
        copyStmt = "copy (" + noDupsSql + ") to STDOUT with delimiter E'\\t'" + " csv header;"
        with open(outputFilePath, 'w') as outputFile:
            #let's try 20MB buffer size for a start, default was 8MB
            self.cur.copy_expert(copyStmt, outputFile, 20480)
        outputFile.close()

    def loadDataV1(self, tableName, header, fileToLoad, primaryKeyColumnName):
        loadSql = "copy " + tableName + " (" + (
            ",".join(header)
        ) + ")" + " from '" + fileToLoad + "' with delimiter E'\\t' csv header;"
        #print("loadSql = "+loadSql)
        self.cur.execute(loadSql)
        self.updateSerialSequence(tableName, primaryKeyColumnName)

    def loadData(self, tableName, header, fileToLoad, primaryKeyColumnName):
        loadSql = "copy " + tableName + " (" + (",".join(
            header)) + ")" + " from STDIN with delimiter E'\\t' csv header;"
        rowsLoaded = 0
        with open(fileToLoad, 'r') as f:
            self.cur.copy_expert(loadSql, f, 20480)
            rowsLoaded = self.cur.rowcount
            #print("Rows loaded = %s" % self.cur.rowcount)
        f.close()
        self.updateSerialSequence(tableName, primaryKeyColumnName)
        return rowsLoaded

    def upsertKVPFromForeignTable(self, fTableName, sourceKey, sourceValue,
                                  targetTable, targetId, targetJsonb):
        kvpSql = "select * from upsertKVPFromForeignTable('" + fTableName.lower(
        ) + "', '" + sourceKey + "', '" + sourceValue + "', '" + targetTable + "', '" + targetId + "', '" + targetJsonb + "');"
        #print ("kvpSQL: %s" % kvpSql)
        self.cur.execute(kvpSql)
        rowsLoaded = self.cur.fetchone()
        if rowsLoaded is not None:
            return rowsLoaded[0]
        else:
            return rowsLoaded

    def updateSerialSequence(self, tableName, primaryKeyColumnName):
        updateSeqSql = "SELECT pg_catalog.setval(pg_get_serial_sequence('" + tableName + "', '" + primaryKeyColumnName + "'), MAX(" + primaryKeyColumnName + ")) FROM " + tableName + ";"
        #print("updateSeqSql = "+updateSeqSql)
        self.cur.execute(updateSeqSql)

    def commitTransaction(self):
        self.conn.commit()

    def rollbackTransaction(self):
        self.conn.rollback()

    def closeConnection(self):
        self.connMgr.disconnectFromDatabase()
Exemple #43
0
    def __init__(self):
        try:
            # initiate logger
            self.logger = logging.getLogger()
            self.logger.setLevel(logging.DEBUG)
            self.log_send_store_handler = LogSendStoreHandler(LOG_LOCATION)
            formatter = logging.Formatter(
                '%(asctime)s - %(levelname)s - %(name)s - %(message)s')
            self.log_send_store_handler.setFormatter(formatter)
            self.logger.addHandler(self.log_send_store_handler)
            self.logger.info('Initialising system...')
            job_info_filter = JobInfoFilter()
            logging.getLogger('apscheduler.scheduler').addFilter(
                job_info_filter)
            logging.getLogger('apscheduler.threadpool').addFilter(
                job_info_filter)

            # load local configuration
            self.conf_man = ConfigurationManager(CONFIG_LOCATION)
            self.log_send_store_handler.update_configuration()

            self.scheduler = Scheduler()
            self.scheduler.start()

            self.packet_manager = PacketManager(self.scheduler)

            # initiate network connection
            self.connection = ConnectionManager()

            # add scheduler and connection to log handler
            self.log_send_store_handler.update_configuration(
                scheduler=self.scheduler,
                connection=self.connection)

            # try to connect
            connected_to_internet = self.connection.check_internet_connection()
            connected_to_server = self.connection.check_server_connection()
            if connected_to_internet and connected_to_server:
                self.load_online_configuration_and_initiate_sending_data()
                self.packet_manager.update_time()
                self.packet_manager.initiate_send_packets(self.connection)
            else:
                '''
                if there is no connection:
                    keep checking for a connection
                    temporarily use offline timer and modbus slave
                    configuration
                '''

                if connected_to_internet:
                    self.packet_manager.update_time()
                self.wait_for_connection_to_load_configuration()

            # initiate sensor timers
            self.read_sensor_scheduler = ReadSensorScheduler(
                self.scheduler,
                self.packet_manager)
            self.led_manager = LedManager(self.scheduler)
            self.led_manager.update_led(PinName.powered, LedState.on)
            self.set_up_led_manager_calls()

            # sleep 2 seconds to intialise led of log handler
            sleep(1)
            self.logger.info('Initialisation complete')

            while True:
                sleep(10)
                self.logger.debug('Alive and kicking')
                if self.logger.level is logging.DEBUG:
                    scheduler_jobs = self.scheduler.get_jobs()
                    if len(scheduler_jobs) > 1:
                        self.logger.debug('Current scheduler jobs:')
                        for index, job in enumerate(scheduler_jobs):
                            self.logger.debug(' Job {0}: {1} {2}'.format(
                                index,
                                job.name,
                                job.next_run_time))
                    else:
                        self.logger.debug('No running scheduler jobs')

        except Exception as e:
            self.logger.error(e)
            raise
            self.log_send_store_handler.send_logs_job()
Exemple #44
0
class Backdoor:
    def stop(self, signal=None, frame=None):
        if signal:
            self.logger.info('Caught SIGINT.')
        self.logger.info('Shutting down.')
        self.connection_manager.stop()
        self.running = False

    def __init__(self):
        self.logger = logging.getLogger('backdoor')
        self.logger.setLevel(logging.DEBUG)
        ch = logging.StreamHandler()
        ch.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        ch.setFormatter(formatter)
        self.logger.addHandler(ch)
        self.logger.info('Starting backdoor.')
        signal.signal(signal.SIGINT, self.stop)
        self.running = True
        self.connection_manager = ConnectionManager(config.api_host, config.api_port)
        self.connection_manager.start()

    def run(self):
        while self.running:
            self.update()
        self.logger.info('Backdoor was shut down.')

    def update(self):
        try:
            query = self.connection_manager.queries.get(block=False)
            self.logger.debug('Got query:')
            self.logger.debug(query.query)
            if query.token in self.connection_manager.devices or query.token in self.connection_manager.webuis:
                self.handle_query(query)
            else:
                self.logger.info('A request from anon registered device was discarded:')
                self.logger.info(query.query)
        except queue.Empty:
            pass
        except Exception as e:
            self.logger.exception('Caught exception during the update process:')
            self.logger.exception(e)

    def issue_query(self, device, query):
        device = device if type(device) == str else device.pubkey
        if device in self.connection_manager.devices:
            self.connection_manager.devices[device].queries.put(query)
        elif device in self.connection_manager.webuis:
            self.connection_manager.webuis[device].queries.put(query)
        else:
            self.logger.info('Device or webui with token %s is not registered. Request was discarded:' % device)
            self.logger.info(query.query)

    def open(self, device):
        query = Query()
        query.create_open(config.server_token)
        self.issue_query(device, query)

    @helpers.handle_dbsession()
    def handle_query(session, self, query):
        response = Query()
        self.logger.debug('Handle query:')
        self.logger.debug(query.query)
        if query.method == 'ACCESS':
            token = session.query(Token).filter_by(value=query.params[0]).first()
            device = session.query(Device).filter_by(pubkey_device=query.token).first()
            if len(query.params) == 1:
                if token in device.tokens and token.expiry_date >= helpers.today():
                    response.create_grant(config.server_token, query.params[0])
                    self.logger.info('Granted access to token %s at device %s' % (query.params[0], query.token))
                else:
                    response.create_deny(config.server_token, query.params[0])
                    self.logger.info('Denied access to token %s at device %s' % (query.params[0], query.token))

                self.issue_query(query.token, response)
            else:
                self.logger.debug('Broken query. Expected exactly 1 parameter.')

        elif query.method == 'FLASH':
            self.logger.info('Requested flash of token %s at device %s' % (query.params[0], query.params[1]))
            if len(query.params) == 2:
                if query.token in self.connection_manager.webuis:
                    response.create_flash(config.server_token, query.params[0])
                    self.issue_query(query.params[1], response)
                else:
                    self.logger.info('Requested flash came from a non webui or an unregistered one. It was discarded.')
            else:
                self.logger.debug('Broken query. Expected exactly 2 parameters.')

        elif query.method == 'FLASHED':
            if len(query.params) == 1:
                session.query(Token).filter_by(value=query.params[0]).first().flashed = True
                self.logger.debug('Token %s was flashed' % query.params[0])
            self.logger.debug('Broken query. Expected exactly 1 parameter.')

        elif query.method == 'OPEN':
            if len(query.params) == 1:
                if query.token in self.connection_manager.webuis:
                    response.create_open(config.server_token)
                    self.issue_query(query.params[0], response)
                    self.logger.debug('Sent OPEN to device with token %s.' % query.params[0])
                else:
                    self.logger.info('Requested flash came from a non webui or an unregistered one. It was discarded.')
            self.logger.debug('Broken query. Expected exactly 1 parameter.')
Exemple #45
0
 def __init__(self):
     self.connection_manager = ConnectionManager()
     env_location = pkg_resources.resource_filename('resources', '.env')
     if os.environ.get('DOTENV_LOADED', '0') != '1':
         load_dotenv(env_location)
     self.personal = os.environ.get("BLAKE2D_KEY", "topsecretkey").encode()
Exemple #46
0
import json
import os.path
import time
from sanic.log import logger
from connection_manager import ConnectionManager

log_params = sanic.log.LOGGING_CONFIG_DEFAULTS
fname = time.asctime().replace(' ', '_').replace(':', '') + '.log'
for h in log_params['handlers']:
    if 'stream' in log_params['handlers'][h]:
        del log_params['handlers'][h]['stream']
        log_params['handlers'][h]['class'] = 'logging.FileHandler'
    log_params['handlers'][h]['filename'] = os.path.join(
        '..', 'server_logs', fname)

CM = ConnectionManager()
kv_server = sanic.Sanic(log_config=log_params)


async def check_exist(kid):
    conn = await CM.get_connection()
    retval = (1, sanic.response.HTTPResponse(status=404))

    try:
        logger.info(f'Searching for pair, key: "{kid}"')
        res = await conn.select('kv', key=[kid], index='primary')
        if len(res) != 0:
            logger.info(
                f'Found pair, key: "{kid}", value: "{res[0]["value"]}"')
            retval = (0, sanic.response.json(res[0]['value']))
        else:
def hurriyetemlak_scraper(page_num):
    cm = ConnectionManager(
    )  # instance of connection manager object. This will be used for changing identity
    start_time = time.time()
    data = [
    ]  # list that contains all of the home attirbutes like numbe of rooms, building age, net area etc.
    last = page_num + 1
    page_tracker = 0

    for t in range(0, last):

        url = 'https://www.hurriyetemlak.com/antalya-kiralik/daire?page={}'.format(
            t)
        rr = cm.request(url)
        # status code of request.
        status_code = rr.code if rr != '' else -1

        if status_code == 200:
            soup = bs4.BeautifulSoup(rr.read(), 'lxml')
            page_tracker += 1
            #         links = []
            #         for i in soup.findAll('div', attrs={'class':'list-item timeshare clearfix'}):
            #             attrs = i.find('a')
            #             links.append('https://www.hurriyetemlak.com'+attrs.get('href'))

            # above commented code is first verison of script for getting home advertisement links
            # this code is a map verison of that for speed issiue.
            links = list(
                map(
                    lambda x: 'https://www.hurriyetemlak.com' + x.find('a').
                    get('href'),
                    soup.findAll(
                        'div', attrs={'class':
                                      'list-item timeshare clearfix'})))

        else:
            print('Request failed.')
            break

        tracker = 0
        requests = 0  # to track number of requesets that has been successfully sent to website.
        for _ in links:
            start_time = time.time()
            r = cm.request(_)
            tracker += 1
            # in order to avoid overloading website, wait between 2 and 7 seconds for each iteration.
            time.sleep(random.randint(2, 7))
            requests += 1
            elapsed_time = time.time() - start_time
            print(
                'Request:{}; Frequency: {} requests/s; elapsed time:{}'.format(
                    requests, requests / elapsed_time, elapsed_time))

            stat_code = r.code if r != '' else -1  # status code of request.
            if stat_code == 200:
                so = bs4.BeautifulSoup(r.read(), 'lxml')
                #lists of attributes of home ads that also includes html tags.
                raw_items = [
                    b.find_all('span')
                    for f in so.findAll('li', attrs={'class': 'info-line'})
                    for b in f.findAll('li')
                ]
            else:
                print('Request failed')
                break

            print(tracker)
            if tracker % 5 == 0:
                cm.new_identity()

            revised_items = []
            for r in raw_items:
                revised_items.append(r)
                # if this particular element is just empty list.
                if str(r) == '[]':
                    break

            dct = {
            }  # dictionary contains column names as keys and values of that columns as values.

            for i in revised_items[:
                                   -1]:  # last element of revised_items list is just empty list. ignore it.
                try:
                    # names of attributes corresponds to particular home ad.
                    col = str(i[0]).split('>')[1].split('<')[0]
                    # values of above attirbutes.
                    row = str(i[1]).split('>')[1].split('<')[0]
                    dct[col] = row

                    # id_col and id_row are id of particular home ad.
                    id_col = so.find('li', attrs={
                        'class': 'realty-numb'
                    }).text.replace('\n', '', 3).split(':')[0]
                    id_row = so.find('li', attrs={
                        'class': 'realty-numb'
                    }).text.replace('\n', '', 3).split(':')[1]
                    dct[id_col] = id_row
                    # title of particular home ad.
                    title = so.find('h1', attrs={
                        'class': 'details-header'
                    }).text
                    dct['Title'] = title
                    # price of particular home ad.
                    price = so.find('li',
                                    attrs={
                                        'class': 'price-line clearfix'
                                    }).text.replace('\n', '', 3)
                    dct['price'] = price
                except:
                    continue
            # list of pandas dataframes.
            data.append(pd.DataFrame(dct, index=[0]))

            delays = [0.5, 1, 1.5, 2, 2.5, 3]
            time.sleep(
                np.random.choice(delays))  # to avoid overloading website.

        print("page {} is done".format(page_tracker))

    print(time.time() - strt_time)
    return data
Exemple #48
0
 def __init__(self, connectionStr):
     self.connMgr = ConnectionManager()
     self.conn = self.connMgr.connectToDatabase(connectionStr)
     self.cur = self.conn.cursor()
     self.fdm = ForeignDataManager()
     self.cur.execute("set work_mem to %s", (self.WORK_MEM, ))
class Daemon(RegexMatchingEventHandler):

    # The path for configuration directory and daemon configuration file
    CONFIG_DIR = os.path.join(os.environ['HOME'], '.PyBox')
    CONFIG_FILEPATH = os.path.join(CONFIG_DIR, 'daemon_config')

    # Default configuration for Daemon, loaded if fail to load the config file from CONFIG_DIR
    DEF_CONF = OrderedDict()
    DEF_CONF['local_dir_state_path'] = os.path.join(CONFIG_DIR,'local_dir_state')
    DEF_CONF['sharing_path'] = os.path.join(os.environ['HOME'], 'sharing_folder')
    DEF_CONF['cmd_address'] = 'localhost'
    DEF_CONF['cmd_port'] = 50001
    DEF_CONF['api_suffix'] = '/API/V1/'
    DEF_CONF['server_address'] = 'http://localhost:5000'
    DEF_CONF['user'] = '******'
    DEF_CONF['pass'] = '******'
    DEF_CONF['timeout_listener_sock'] = 0.5
    DEF_CONF['backlog_listener_sock'] = 1

    IGNORED_REGEX = ['.*\.[a-zA-z]+?#',  # Libreoffice suite temporary file ignored
                     '.*\.[a-zA-Z]+?~',  # gedit issue solved ignoring this pattern:
                     # gedit first delete file, create, and move to dest_path *.txt~
                     ]

    # Calculate int size in the machine architecture
    INT_SIZE = struct.calcsize('!i')

    def __init__(self):
        RegexMatchingEventHandler.__init__(self, ignore_regexes=Daemon.IGNORED_REGEX, ignore_directories=True)

        # Just Initialize variable the Daemon.start() do the other things
        self.daemon_state = 'down'  # TODO implement the daemon state (disconnected, connected, syncronizing, ready...)
        self.running = 0
        self.client_snapshot = {} # EXAMPLE {'<filepath1>: ['<timestamp>', '<md5>', '<filepath2>: ...}
        self.local_dir_state = {} # EXAMPLE {'last_timestamp': '<timestamp>', 'global_md5': '<md5>'}
        self.listener_socket = None
        self.observer = None
        self.cfg = self.load_cfg(Daemon.CONFIG_FILEPATH)
        self.init_sharing_path()
        self.conn_mng = ConnectionManager(self.cfg)

    def load_cfg(self, config_path):
        """
        Load config, if impossible to find it or config file is corrupted restore it and load default configuration
        :param config_path: Path of config
        :return: dictionary containing configuration
        """
        def build_default_cfg():
            """
            Restore default config file by writing on file
            :return: default configuration contained in the dictionary DEF_CONF
            """
            with open(Daemon.CONFIG_FILEPATH, 'wb') as fo:
                json.dump(Daemon.DEF_CONF, fo, skipkeys=True, ensure_ascii=True, indent=4)
            return Daemon.DEF_CONF

        # Search if config directory exists otherwise create it
        if not os.path.isdir(Daemon.CONFIG_DIR):
            try:
                os.makedirs(Daemon.CONFIG_DIR)
            except (OSError, IOError):
                self.stop(1, '\nImpossible to create "{}" directory! Permission denied!\n'.format(Daemon.CONFIG_DIR))

        if os.path.isfile(config_path):
            try:
                with open(config_path, 'r') as fo:
                    loaded_config = json.load(fo)
            except ValueError:
                print '\nImpossible to read "{0}"! Config file overwrited and loaded default config!\n'.format(config_path)
                return build_default_cfg()
            corrupted_config = False
            for k in Daemon.DEF_CONF:
                if k not in loaded_config:
                    corrupted_config = True
            # In the case is all gone right run config in loaded_config
            if not corrupted_config:
                return loaded_config
            else:
                print '\nWarning "{0}" corrupted! Config file overwrited and loaded default config!\n'.format(config_path)
                return build_default_cfg()
        else:
            print '\nWarning "{0}" doesn\'t exist, Config file overwrited and loaded default config!\n'.format(config_path)
            return build_default_cfg()

    def init_sharing_path(self):
        """
        Check that the sharing folder exists otherwise create it.
        If is impossible to create exit with msg error.
        """
        if not os.path.isdir(self.cfg['sharing_path']):
            try:
                os.makedirs(self.cfg['sharing_path'])
            except OSError:
                self.stop(1, '\nImpossible to create "{0}" directory! Check sharing_path value contained in the following file:\n"{1}"\n'
                          .format(self.cfg['sharing_path'], Daemon.CONFIG_FILEPATH))

    def build_client_snapshot(self):
        """
        Build a snapshot of the sharing folder with the following structure

        self.client_snapshot
        {
            "<file_path>":('<timestamp>', '<md5>')
        }
        """
        self.client_snapshot = {}
        for dirpath, dirs, files in os.walk(self.cfg['sharing_path']):
            for filename in files:
                filepath = os.path.join(dirpath, filename)
                unwanted_file = False
                for r in Daemon.IGNORED_REGEX:
                    if re.match(r, filepath) is not None:
                        unwanted_file = True
                        print 'Ignored Path:', filepath
                        break
                if not unwanted_file:
                    relative_path = self.relativize_path(filepath)
                    with open(filepath, 'rb') as f:
                        self.client_snapshot[relative_path] = ['', hashlib.md5(f.read()).hexdigest()]

    def _is_directory_modified(self):
        if self.calculate_md5_of_dir() != self.local_dir_state['global_md5']:
            return True
        else:
            return False

    def search_md5(self, searched_md5):
        """
        Recive as parameter the md5 of a file and return the first knowed path with the same md5
        """
        for path, tupla in self.client_snapshot.iteritems():
            if searched_md5 in tupla[1]:
                return path
        else:
            return None

    def _sync_process(self, server_timestamp, server_dir_tree):
        # Makes the synchronization logic and return a list of commands to launch
        # for server synchronization

        def _filter_tree_difference(server_dir_tree):
            # process local dir_tree and server dir_tree
            # and makes a diffs classification
            # return a dict representing that classification
            # E.g. { 'new_on_server'     : <[<filepath>, ...]>,  # files in server, but not in client
            #   'modified'          : <[<filepath>, ...]>,  # files in server and client, but different
            #   'new_on_client'     : <[<filepath>, ...]>,  # files not in server, but in client
            # }
            client_files = set(self.client_snapshot.keys())
            server_files = set(server_dir_tree.keys())

            new_on_server = list(server_files.difference(client_files))
            new_on_client = list(client_files.difference(server_files))
            modified = []

            for filepath in server_files.intersection(client_files):
                # check files md5
                if server_dir_tree[filepath][1] != self.client_snapshot[filepath][1]:
                    modified.append(filepath)

            return {'new_on_server': new_on_server, 'modified': modified, 'new_on_client': new_on_client}

        def _make_copy(src, dst):
            abs_src = self.absolutize_path(src)
            abs_dst = self.absolutize_path(dst)
            self.observer.skip(abs_dst)
            try:
                copy2(abs_src, abs_dst)
            except IOError:
                return False

            self.client_snapshot[dst] = self.client_snapshot[src]
            return True

        def _make_move(src, dst):
            abs_src = self.absolutize_path(src)
            abs_dst = self.absolutize_path(dst)
            self.observer.skip(abs_dst)
            try:
                move(abs_src, abs_dst)
            except IOError:
                return False

            self.client_snapshot[dst] = self.client_snapshot[src]
            self.client_snapshot.pop(src)
            return True

        def _check_md5(dir_tree, md5):
            for k, v in dir_tree.items():
                if md5 == v[1]:
                    return k
            return None

        local_timestamp = self.local_dir_state['last_timestamp']
        tree_diff = _filter_tree_difference(server_dir_tree)

        sync_commands = []

        if self._is_directory_modified():
            if local_timestamp == server_timestamp:
                # simple case: the client has the command
                # it sends all folder modifications to server

                # files in server but not in client: remove them from server
                for filepath in tree_diff['new_on_server']:
                    sync_commands.append(('delete', filepath))
                    #self.conn_mng.dispatch_request('delete', {'filepath': filepath})

                # files modified in client: send modified files to server
                for filepath in tree_diff['modified']:
                    sync_commands.append(('modified', filepath))
                    #self.conn_mng.dispatch_request('modified', {'filepath': filepath})

                # files in client but not in server: upload them to server
                for filepath in tree_diff['new_on_client']:
                    sync_commands.append(('upload', filepath))
                    #self.conn_mng.dispatch_request('upload', {'filepath': filepath})

            else:  # local_timestamp < server_timestamp
                # the server has the command
                for filepath in tree_diff['new_on_server']:
                    timestamp, md5 = server_dir_tree[filepath]
                    existed_filepath = _check_md5(self.client_snapshot, md5)

                    if existed_filepath:
                        # it's a copy or a move
                        if _check_md5(server_dir_tree, md5):
                            _make_copy(existed_filepath, filepath)
                        else:
                            _make_move(existed_filepath, filepath)
                            tree_diff['new_on_client'].remove(filepath)
                    else:
                        if timestamp > local_timestamp:
                            # the files in server is more updated
                            sync_commands.append(('download', filepath))
                            #self.conn_mng.dispatch_request('download', {'filepath': filepath})
                        else:
                            # the client has deleted the file, so delete it on server
                            sync_commands.append(('delete', filepath))
                            #self.conn_mng.dispatch_request('delete', {'filepath': filepath})

                for filepath in tree_diff['modified']:
                    timestamp, md5 = server_dir_tree[filepath]

                    if timestamp < local_timestamp:
                        # the client has modified the file, so update it on server
                        sync_commands.append(('modify', filepath))
                        #self.conn_mng.dispatch_request('modify', {'filepath': filepath})
                    else:
                        # it's the worst case:
                        # we have a conflict with server,
                        # someone has modified files while daemon was down and someone else has modified
                        # the same file on server
                        conflicted_path = ''.join([filepath, '.conflicted'])
                        _make_copy(filepath, conflicted_path)
                        sync_commands.append(('upload', conflicted_path))
                        #self.conn_mng.dispatch_request('upload', {'filepath': conflicted_path})

                for filepath in tree_diff['new_on_client']:
                    sync_commands.append(('upload', filepath))
                    #self.conn_mng.dispatch_request('upload', {'filepath': filepath})

        else:  # directory not modified
            if local_timestamp == server_timestamp:
                # it's the best case. Client and server are already synchronized
                return []
            else:  # local_timestamp < server_timestamp
                # the server has the command
                for filepath in tree_diff['new_on_server']:
                    timestamp, md5 = server_dir_tree[filepath]
                    existed_filepath = _check_md5(self.client_snapshot, md5)

                    if existed_filepath:
                        # it's a copy or a move
                        if _check_md5(server_dir_tree, md5):
                            _make_copy(existed_filepath, filepath)
                        else:
                            _make_move(existed_filepath, filepath)
                            tree_diff['new_on_client'].remove(filepath)
                    else:
                        # it's a new file
                        sync_commands.append(('download', filepath))
                        #self.conn_mng.dispatch_request('download', {'filepath': filepath})

                for filepath in tree_diff['modified']:
                    sync_commands.append(('download', filepath))
                    #self.conn_mng.dispatch_request('download', {'filepath': filepath})

                for filepath in tree_diff['new_on_client']:
                    # files that have been deleted on server, so have to delete them
                    abs_filepath = self.absolutize_path(filepath)
                    self.observer.skip(abs_filepath)
                    try:
                        os.remove(abs_filepath)
                    except OSError:
                        # it should raise an exceptions
                        pass
                    self.client_snapshot.pop(filepath)

        return sync_commands

    def sync_with_server(self):
        """
        Makes the synchronization with server
        """
        response = self.conn_mng.dispatch_request('get_server_snapshot', '')
        if response is None:
            self.stop(1, '\nReceived bad snapshot. Server down?\n')

        server_timestamp = response['server_timestamp']
        files = response['files']

        sync_commands = self._sync_process(server_timestamp, files)
        self.update_local_dir_state(server_timestamp)

        # Initialize the variable where we put the timestamp of the last operation we did
        last_operation_timestamp = None

        # makes all synchronization commands
        for command, path in sync_commands:
            if command == 'delete':
                event_timestamp = self.conn_mng.dispatch_request(command, {'filepath': path})
                if event_timestamp:
                    print 'event_timestamp di "delete" INTO SYNC:', event_timestamp
                    last_operation_timestamp = event_timestamp['server_timestamp']
                    # If i can't find path inside client_snapshot there is inconsistent problem in client_snapshot!
                    if self.client_snapshot.pop(path, 'ERROR') == 'ERROR':
                        print 'Error during delete event INTO SYNC! Impossible to find "{}" inside client_snapshot'.format(path)
                else:
                    self.stop(1, 'Error during connection with the server. Server fail to "delete" this file: {}'.format(path))

            elif command == 'modified' or command == 'upload':
                event_timestamp = self.conn_mng.dispatch_request(command, {'filepath': path})
                if event_timestamp:
                    print 'event_timestamp di "{}" INTO SYNC: {}'.format(command, event_timestamp)
                    last_operation_timestamp = event_timestamp['server_timestamp']
                else:
                    self.stop(1, 'Error during connection with the server. Server fail to "{}" this file: {}'.format(command, path))

            else: # command == 'download'
                print 'skip di download'
                self.observer.skip(self.absolutize_path(path))
                connection_result = self.conn_mng.dispatch_request(command, {'filepath': path})
                if connection_result:
                    print 'Downloaded file with path "{}" INTO SYNC'.format(path)
                    self.client_snapshot[path] = files[path]
                else:
                    self.stop(1, 'Error during connection with the server. Client fail to "download" this file: {}'.format(path))

        if last_operation_timestamp:
            self.update_local_dir_state(last_operation_timestamp)

    def relativize_path(self, abs_path):
        """
        This function relativize the path watched by daemon:
        for example: /home/user/watched/subfolder/ will be subfolder/
        """
        if abs_path.startswith(self.cfg['sharing_path']):
            relative_path = abs_path[len(self.cfg['sharing_path']) + 1:]
            return relative_path
        else:
            raise Exception

    def absolutize_path(self, rel_path):
        """
        This function absolutize a path that i have relativize before:
        for example: subfolder/ will be /home/user/watched/subfolder/
        """
        return os.path.join(self.cfg['sharing_path'], rel_path)

    def create_observer(self):
        """
        Create an instance of the watchdog Observer thread class.
        """
        self.observer = SkipObserver()
        self.observer.schedule(self, path=self.cfg['sharing_path'], recursive=True)

    # TODO handly erorrs in dictionary if the client_dispatcher miss required data!!
    # TODO update struct with new more performance data structure
    # TODO verify what happen if the server return a error message
    ####################################

    def on_created(self, e):
        def build_data(cmd, rel_new_path, new_md5, founded_path=None):
            """
            Prepares the data from event handler to be delivered to connection_manager.
            """
            data = {'cmd': cmd}
            if cmd == 'copy':
                data['file'] = {'src': founded_path,
                                'dst': rel_new_path,
                                'md5': new_md5,
                                }
            else:
                data['file'] = {'filepath': rel_new_path,
                                'md5': new_md5,
                                }
            return data
        new_md5 = self.hash_file(e.src_path)
        rel_new_path = self.relativize_path(e.src_path)
        founded_path = self.search_md5(new_md5)

        # with this check i found the copy events
        if founded_path:
            print 'start copy'
            data = build_data('copy', rel_new_path, new_md5, founded_path)

        # this elif check that this created aren't modified event
        elif rel_new_path in self.client_snapshot:
            print 'start modified FROM CREATE!!!!!'
            data = build_data('modify', rel_new_path, new_md5)

        else: # Finally we find a real create event!
            print 'start create'
            data = build_data('upload', rel_new_path, new_md5)

        # Send data to connection manager dispatcher and check return value. If all go right update client_snapshot and local_dir_state
        event_timestamp = self.conn_mng.dispatch_request(data['cmd'], data['file'])
        print 'event_timestamp di "{}" = {}'.format(data['cmd'], event_timestamp)
        if event_timestamp:
            self.client_snapshot[rel_new_path] = [event_timestamp, new_md5]
            self.update_local_dir_state(event_timestamp['server_timestamp'])
        else:
            self.stop(1, 'Impossible to connect with the server. Failed during "{0}" operation on "{1}" file'
                      .format(data['cmd'], e.src_path ))

    def on_moved(self, e):

        print 'start move'
        rel_src_path = self.relativize_path(e.src_path)
        rel_dest_path = self.relativize_path(e.dest_path)
        # If i can't find rel_src_path inside client_snapshot there is inconsistent problem in client_snapshot!
        if self.client_snapshot.get(rel_src_path, 'ERROR') == 'ERROR':
            self.stop(1, 'Error during move event! Impossible to find "{}" inside client_snapshot'.format(rel_dest_path))
        md5 = self.client_snapshot[rel_src_path][1]
        data = {'src': rel_src_path,
                 'dst': rel_dest_path,
                 'md5': md5,
                 }
        # Send data to connection manager dispatcher and check return value. If all go right update client_snapshot and local_dir_state
        event_timestamp = self.conn_mng.dispatch_request('move', data)
        print 'event_timestamp di "move" =', event_timestamp
        if event_timestamp:
            self.client_snapshot[rel_dest_path] = [event_timestamp, md5]
            # I'm sure that rel_src_path exists inside client_snapshot because i check above so i don't check pop result
            self.client_snapshot.pop(rel_src_path)
            self.update_local_dir_state(event_timestamp['server_timestamp'])
        else:
            self.stop(1, 'Impossible to connect with the server. Failed during "move" operation on "{}" file'.format(e.src_path ))

    def on_modified(self, e):

        print 'start modified'
        new_md5 = self.hash_file(e.src_path)
        rel_path = self.relativize_path(e.src_path)

        data = {'filepath': rel_path,
                'md5': new_md5
                }

        # Send data to connection manager dispatcher and check return value. If all go right update client_snapshot and local_dir_state
        event_timestamp = self.conn_mng.dispatch_request('modify', data)
        if event_timestamp:
            print 'event_timestamp di "modified" =', event_timestamp
            self.client_snapshot[rel_path] = [event_timestamp, new_md5]
            self.update_local_dir_state(event_timestamp['server_timestamp'])
        else:
            self.stop(1, 'Impossible to connect with the server. Failed during "delete" operation on "{}" file'.format(e.src_path))

    def on_deleted(self, e):

        print 'start delete'
        rel_deleted_path = self.relativize_path(e.src_path)

        # Send data to connection manager dispatcher and check return value. If all go right update client_snapshot and local_dir_state
        event_timestamp = self.conn_mng.dispatch_request('delete', {'filepath': rel_deleted_path})
        if event_timestamp:
            print 'event_timestamp di "delete" =', event_timestamp
            # If i can't find rel_deleted_path inside client_snapshot there is inconsistent problem in client_snapshot!
            if self.client_snapshot.pop(rel_deleted_path, 'ERROR') == 'ERROR':
                print 'Error during delete event! Impossible to find "{}" inside client_snapshot'.format(rel_deleted_path)
            self.update_local_dir_state(event_timestamp['server_timestamp'])
        else:
            self.stop(1, 'Impossible to connect with the server. Failed during "delete" operation on "{}" file'.format(e.src_path))

    def start(self):
        """
        Starts the communication with the command_manager.
        """
        self.build_client_snapshot()
        self.load_local_dir_state()

        # Operations necessary to start the daemon
        self.create_observer()
        self.observer.start()
        self.sync_with_server()

        self.listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.listener_socket.bind((self.cfg['cmd_address'], self.cfg['cmd_port']))
        self.listener_socket.listen(self.cfg['backlog_listener_sock'])
        r_list = [self.listener_socket]
        self.daemon_state = 'started'
        self.running = 1
        polling_counter = 0
        try:
            while self.running:
                r_ready, w_ready, e_ready = select.select(r_list, [], [], self.cfg['timeout_listener_sock'])

                for s in r_ready:

                    if s == self.listener_socket:
                        # handle the server socket
                        client_socket, client_address = self.listener_socket.accept()
                        r_list.append(client_socket)
                    else:
                        # handle all other sockets
                        length = s.recv(Daemon.INT_SIZE)
                        if length:
                            # i need to do [0] and cast int because the struct.unpack return a tupla like (23234234,)
                            # with the length as a string
                            length = int(struct.unpack('!i', length)[0])
                            message = json.loads(s.recv(length))
                            for cmd, data in message.items():
                                if cmd == 'shutdown':
                                    raise KeyboardInterrupt
                                self.conn_mng.dispatch_request(cmd, data)
                        else:
                            s.close()
                            r_list.remove(s)

                # synchronization polling
                # makes the polling every 3 seconds, so it waits six cycle (0.5 * 6 = 3 seconds)
                # maybe optimizable but now functional
                polling_counter += 1
                if polling_counter == 6:
                    self.sync_with_server()
                    polling_counter = 0

        except KeyboardInterrupt:
            self.stop(0)
        self.observer.stop()
        self.observer.join()
        self.listener_socket.close()

    def stop(self, exit_status, exit_message=None):
        """
        Stop the Daemon components (observer and communication with command_manager).
        """
        if self.daemon_state == 'started':
            self.running = 0
            self.daemon_state = 'down'
        self.save_local_dir_state()
        if exit_message:
            print exit_message
        exit(exit_status)

    def update_local_dir_state(self, last_timestamp):
        """
        Update the local_dir_state with last_timestamp operation and save it on disk
        """
        if isinstance(last_timestamp, int):
            self.local_dir_state['last_timestamp'] = last_timestamp
            self.local_dir_state['global_md5'] = self.calculate_md5_of_dir()
            self.save_local_dir_state()
        else:
            self.stop(1, 'Not int value assigned to local_dir_state[\'last_timestamp\']!\nIncorrect value: {}'.format(last_timestamp))

    def save_local_dir_state(self):
        """
        Save local_dir_state on disk
        """
        json.dump(self.local_dir_state, open(self.cfg['local_dir_state_path'], "wb"), indent=4)
        print "local_dir_state saved"

    def load_local_dir_state(self):
        """
        Load local dir state on self.local_dir_state variable
        if file doesn't exists it will be created without timestamp
        """
        def _rebuild_local_dir_state():
            self.local_dir_state = {'last_timestamp': 0.0, 'global_md5': self.calculate_md5_of_dir()}
            json.dump(self.local_dir_state, open(self.cfg['local_dir_state_path'], "wb"), indent=4)

        if os.path.isfile(self.cfg['local_dir_state_path']):
            self.local_dir_state = json.load(open(self.cfg['local_dir_state_path'], "rb"))
            if 'last_timestamp' in self.local_dir_state and 'global_md5' in self.local_dir_state \
                    and isinstance(self.local_dir_state['last_timestamp'], int):
                print "questo è last_timestamp:", self.local_dir_state['last_timestamp']
                #self.local_dir_state['last_timestamp'] = int(self.local_dir_state['last_timestamp'])
                print "Loaded local_dir_state"
            else:
                print "local_dir_state corrupted. Reinitialized new local_dir_state"
                _rebuild_local_dir_state()
        else:
            print "local_dir_state not found. Initialize new local_dir_state"
            _rebuild_local_dir_state()


    def calculate_md5_of_dir(self, verbose=0):
        """
        Calculate the md5 of the entire directory,
        with the md5 in client_snapshot and the md5 of full filepath string.
        When the filepath isn't in client_snapshot the md5 is calculated on fly
        :return is the md5 hash of the directory
        """
        directory = self.cfg['sharing_path']
        if verbose:
            start = time.time()
        md5Hash = hashlib.md5()
        if not os.path.exists(directory):
            self.stop(1, 'Error during calculate md5! Impossible to find "{}" in user folder'.format(directory))

        for root, dirs, files in os.walk(directory, followlinks=False):
            for names in files:
                filepath = os.path.join(root, names)
                rel_path = self.relativize_path(filepath)
                if rel_path in self.client_snapshot:
                    md5Hash.update(self.client_snapshot[rel_path][1])
                    md5Hash.update(hashlib.md5(filepath).hexdigest())
                else:
                    hashed_file = self.hash_file(filepath)
                    if hashed_file:
                        md5Hash.update(hashed_file)
                        md5Hash.update(hashlib.md5(filepath).hexdigest())
                    else:
                        print "can't hash file: ", filepath

        if verbose:
            stop = time.time()
            print stop - start
        return md5Hash.hexdigest()

    def hash_file(self, file_path, chunk_size=1024):
        """
        :accept an absolute file path
        :return the md5 hash of received file
        """
        md5Hash = hashlib.md5()
        try:
            f1 = open(file_path, 'rb')
            while 1:
                # Read file in as little chunks
                    buf = f1.read(chunk_size)
                    if not buf:
                        break
                    md5Hash.update(hashlib.md5(buf).hexdigest())
            f1.close()
            return md5Hash.hexdigest()
        except (OSError, IOError) as e:
            print e
            return None
Exemple #50
0
class AWSIOTDeviceAdapter(DeviceAdapter):
    """A device adapter allowing connections to devices over AWS IoT

    Args:
        port (string): A optional port string specifying a topic prefix
            to use if we are trying to connect to a gateway, otherwise,
            we assume that we're connecting directly to a device that
            is attached to AWS IoT.
    """
    def __init__(self, port):
        super(AWSIOTDeviceAdapter, self).__init__()

        self.set_config('default_timeout', 5.0)

        reg = ComponentRegistry()
        endpoint = reg.get_config('awsiot-endpoint')
        rootcert = reg.get_config('awsiot-rootcert')
        iamuser = reg.get_config('awsiot-iamkey')
        iamsecret = reg.get_config('awsiot-iamtoken')
        iamsession = reg.get_config('awsiot-session', default=None)

        args = {}
        args['endpoint'] = endpoint
        args['root_certificate'] = rootcert
        args['use_websockets'] = True
        args['iam_key'] = iamuser
        args['iam_secret'] = iamsecret
        args['iam_session'] = iamsession

        self._logger = logging.getLogger(__name__)

        # Port should be a topic prefix that allows us to connect
        # only to subset of IOTile devices managed by a gateway
        # rather than to directly accessible iotile devices.
        if port is None:
            port = ""

        if len(port) > 0 and port[-1] != '/':
            port = port + '/'

        self.client = OrderedAWSIOTClient(args)
        self.name = str(uuid.uuid4())
        self.client.connect(self.name)
        self.prefix = port

        self.conns = ConnectionManager(self.id)
        self.conns.start()

        self.client.subscribe(self.prefix + 'devices/+/data/advertisement',
                              self._on_advertisement,
                              ordered=False)

        self._deferred = queue.Queue()

        self.set_config('minimum_scan_time', 5.0)
        self.set_config('probe_supported', True)
        self.set_config('probe_required', True)
        self.mtu = self.get_config(
            'mtu', 60 * 1024)  # Split script payloads larger than this

    def connect_async(self, connection_id, connection_string, callback):
        """Connect to a device by its connection_string

        This function looks for the device on AWS IOT using the preconfigured
        topic prefix and looking for:
        <prefix>/devices/connection_string

        It then attempts to lock that device for exclusive access and
        returns a callback if successful.

        Args:
            connection_id (int): A unique integer set by the caller for referring to this connection
                once created
            connection_string (string): A device id of the form d--XXXX-YYYY-ZZZZ-WWWW
            callback (callable): A callback function called when the connection has succeeded or
                failed
        """

        topics = MQTTTopicValidator(self.prefix +
                                    'devices/{}'.format(connection_string))
        key = self._generate_key()
        name = self.name

        conn_message = {
            'type': 'command',
            'operation': 'connect',
            'key': key,
            'client': name
        }
        context = {'key': key, 'slug': connection_string, 'topics': topics}

        self.conns.begin_connection(connection_id, connection_string,
                                    callback, context,
                                    self.get_config('default_timeout'))

        self._bind_topics(topics)

        try:
            self.client.publish(topics.connect, conn_message)
        except IOTileException:
            self._unbind_topics(topics)
            self.conns.finish_connection(connection_id, False,
                                         'Failed to send connection message')

    def disconnect_async(self, conn_id, callback):
        """Asynchronously disconnect from a device that has previously been connected

        Args:
            conn_id (int): a unique identifier for this connection on the DeviceManager
                that owns this adapter.
            callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)
            when the disconnection finishes.  Disconnection can only either succeed or timeout.
        """

        try:
            context = self.conns.get_context(conn_id)
        except ArgumentError:
            callback(conn_id, self.id, False,
                     "Could not find connection information")
            return

        self.conns.begin_disconnection(conn_id, callback,
                                       self.get_config('default_timeout'))

        topics = context['topics']
        disconn_message = {
            'key': context['key'],
            'client': self.name,
            'type': 'command',
            'operation': 'disconnect'
        }

        self.client.publish(topics.action, disconn_message)

    def send_script_async(self, conn_id, data, progress_callback, callback):
        """Asynchronously send a a script to this IOTile device

        Args:
            conn_id (int): A unique identifer that will refer to this connection
            data (string): the script to send to the device
            progress_callback (callable): A function to be called with status on our progress, called as:
                progress_callback(done_count, total_count)
            callback (callable): A callback for when we have finished sending the script. The callback will be called as
                callback(connection_id, adapter_id, success, failure_reason)
                'connection_id': the connection id
                'adapter_id': this adapter's id
                'success': a bool indicating whether we received a response to our attempted RPC
                'failure_reason': a string with the reason for the failure if success == False
        """

        try:
            context = self.conns.get_context(conn_id)
        except ArgumentError:
            callback(conn_id, self.id, False,
                     "Could not find connection information")
            return

        topics = context['topics']
        context['progress_callback'] = progress_callback

        self.conns.begin_operation(conn_id, 'script', callback, 60.0)

        chunks = 1
        if len(data) > self.mtu:
            chunks = len(data) // self.mtu
            if len(data) % self.mtu != 0:
                chunks += 1

        # Send the script out possibly in multiple chunks if it's larger than our maximum transmit unit
        for i in xrange(0, chunks):
            start = i * self.mtu
            chunk = data[start:start + self.mtu]
            encoded = base64.standard_b64encode(chunk)

            script_message = {
                'key': context['key'],
                'client': self.name,
                'type': 'command',
                'operation': 'send_script',
                'script': encoded,
                'fragment_count': chunks,
                'fragment_index': i
            }

            self.client.publish(topics.action, script_message)

    def send_rpc_async(self, conn_id, address, rpc_id, payload, timeout,
                       callback):
        """Asynchronously send an RPC to this IOTile device

        Args:
            conn_id (int): A unique identifier that will refer to this connection
            address (int): the address of the tile that we wish to send the RPC to
            rpc_id (int): the 16-bit id of the RPC we want to call
            payload (bytearray): the payload of the command
            timeout (float): the number of seconds to wait for the RPC to execute
            callback (callable): A callback for when we have finished the RPC.  The callback will be called as"
                callback(connection_id, adapter_id, success, failure_reason, status, payload)
                'connection_id': the connection id
                'adapter_id': this adapter's id
                'success': a bool indicating whether we received a response to our attempted RPC
                'failure_reason': a string with the reason for the failure if success == False
                'status': the one byte status code returned for the RPC if success == True else None
                'payload': a bytearray with the payload returned by RPC if success == True else None
        """

        try:
            context = self.conns.get_context(conn_id)
        except ArgumentError:
            callback(conn_id, self.id,
                     False, "Could not find connection information", 0xFF,
                     bytearray())
            return

        self.conns.begin_operation(conn_id, 'rpc', callback, timeout)

        topics = context['topics']

        encoded_payload = binascii.hexlify(payload)

        rpc_message = {
            'key': context['key'],
            'client': self.name,
            'type': 'command',
            'operation': 'rpc',
            'address': address,
            'rpc_id': rpc_id,
            'payload': encoded_payload,
            'timeout': timeout
        }

        self.client.publish(topics.action, rpc_message)

    def _open_rpc_interface(self, conn_id, callback):
        """Enable RPC interface for this IOTile device

        Args:
            conn_id (int): the unique identifier for the connection
            callback (callback): Callback to be called when this command finishes
                callback(conn_id, adapter_id, success, failure_reason)
        """

        self._open_interface(conn_id, 'rpc', callback)

    def _open_streaming_interface(self, conn_id, callback):
        """Enable streaming interface for this IOTile device

        Args:
            conn_id (int): the unique identifier for the connection
            callback (callback): Callback to be called when this command finishes
                callback(conn_id, adapter_id, success, failure_reason)
        """

        self._open_interface(conn_id, 'streaming', callback)

    def _open_tracing_interface(self, conn_id, callback):
        """Enable tracing interface for this IOTile device

        Args:
            conn_id (int): the unique identifier for the connection
            callback (callback): Callback to be called when this command finishes
                callback(conn_id, adapter_id, success, failure_reason)
        """

        self._open_interface(conn_id, 'tracing', callback)

    def _open_script_interface(self, conn_id, callback):
        """Enable script interface for this IOTile device

        Args:
            conn_id (int): the unique identifier for the connection
            callback (callback): Callback to be called when this command finishes
                callback(conn_id, adapter_id, success, failure_reason)
        """

        self._open_interface(conn_id, 'script', callback)

    def _open_interface(self, conn_id, iface, callback):
        """Open an interface on this device

        Args:
            conn_id (int): the unique identifier for the connection
            iface (string): the interface name to open
            callback (callback): Callback to be called when this command finishes
                callback(conn_id, adapter_id, success, failure_reason)
        """

        try:
            context = self.conns.get_context(conn_id)
        except ArgumentError:
            callback(conn_id, self.id, False,
                     "Could not find connection information")
            return

        self.conns.begin_operation(conn_id, 'open_interface', callback,
                                   self.get_config('default_timeout'))

        topics = context['topics']

        open_iface_message = {
            'key': context['key'],
            'type': 'command',
            'operation': 'open_interface',
            'client': self.name,
            'interface': iface
        }
        self.client.publish(topics.action, open_iface_message)

    def stop_sync(self):
        """Synchronously stop this adapter
        """

        conn_ids = self.conns.get_connections()

        # If we have any open connections, try to close them here before shutting down
        for conn in conn_ids:
            try:
                self.disconnect_sync(conn)
            except HardwareError:
                pass

        self.client.disconnect()
        self.conns.stop()

    def probe_async(self, callback):
        """Probe for visible devices connected to this DeviceAdapter.

        Args:
            callback (callable): A callback for when the probe operation has completed.
                callback should have signature callback(adapter_id, success, failure_reason) where:
                    success: bool
                    failure_reason: None if success is True, otherwise a reason for why we could not probe
        """

        topics = MQTTTopicValidator(self.prefix)
        self.client.publish(topics.probe, {
            'type': 'command',
            'operation': 'probe',
            'client': self.name
        })
        callback(self.id, True, None)

    def periodic_callback(self):
        """Periodically help maintain adapter internal state
        """

        while True:
            try:
                action = self._deferred.get(False)
                action()
            except queue.Empty:
                break
            except Exception:
                self._logger.exception('Exception in periodic callback')

    def _bind_topics(self, topics):
        """Subscribe to all the topics we need to communication with this device

        Args:
            topics (MQTTTopicValidator): The topic validator for this device that
                we are connecting to.
        """

        # FIXME: Allow for these subscriptions to fail and clean up the previous ones
        # so that this function is atomic

        self.client.subscribe(topics.status, self._on_status_message)
        self.client.subscribe(topics.tracing, self._on_trace)
        self.client.subscribe(topics.streaming, self._on_report)
        self.client.subscribe(topics.response, self._on_response_message)

    def _unbind_topics(self, topics):
        """Unsubscribe to all of the topics we needed for communication with device

        Args:
            topics (MQTTTopicValidator): The topic validator for this device that
                we have connected to.
        """

        self.client.unsubscribe(topics.status)
        self.client.unsubscribe(topics.tracing)
        self.client.unsubscribe(topics.streaming)
        self.client.unsubscribe(topics.response)

    def _generate_key(self):
        """Generate a random 32 byte key and encode it in hex

        Returns:
            string: Cryptographically random 64 character string
        """

        key = os.urandom(32)
        return binascii.hexlify(key)

    def _find_connection(self, topic):
        """Attempt to find a connection id corresponding with a topic

        The device is found by assuming the topic ends in <slug>/[control|data]/channel

        Args:
            topic (string): The topic we received a message on

        Returns:
            int: The internal connect id (device slug) associated with this topic
        """

        parts = topic.split('/')
        if len(parts) < 3:
            return None

        slug = parts[-3]
        return slug

    def _on_advertisement(self, sequence, topic, message):
        try:
            # FIXME: We need a global topic validator to validate these messages
            # message = self.topics.validate_message(['advertisement'], message_type, message)

            del message['operation']
            del message['type']
            self._trigger_callback(
                'on_scan', self.id, message,
                60.)  # FIXME: Get the timeout from somewhere
        except IOTileException as exc:
            pass

    def _on_report(self, sequence, topic, message):
        """Process a report received from a device.

        Args:
            sequence (int): The sequence number of the packet received
            topic (string): The topic this message was received on
            message (dict): The message itself
        """

        try:
            conn_key = self._find_connection(topic)
            conn_id = self.conns.get_connection_id(conn_key)
        except ArgumentError:
            self._logger.warn(
                "Dropping report message that does not correspond with a known connection, topic=%s",
                topic)
            return

        try:
            rep_msg = messages.ReportNotification.verify(message)

            serialized_report = {}
            serialized_report['report_format'] = rep_msg['report_format']
            serialized_report['encoded_report'] = rep_msg['report']
            serialized_report['received_time'] = datetime.datetime.strptime(
                rep_msg['received_time'].decode(), "%Y%m%dT%H:%M:%S.%fZ")

            report = IOTileReportParser.DeserializeReport(serialized_report)
            self._trigger_callback('on_report', conn_id, report)
        except Exception:
            self._logger.exception("Error processing report conn_id=%d",
                                   conn_id)

    def _on_trace(self, sequence, topic, message):
        """Process a trace received from a device.

        Args:
            sequence (int): The sequence number of the packet received
            topic (string): The topic this message was received on
            message (dict): The message itself
        """

        try:
            conn_key = self._find_connection(topic)
            conn_id = self.conns.get_connection_id(conn_key)
        except ArgumentError:
            self._logger.warn(
                "Dropping trace message that does not correspond with a known connection, topic=%s",
                topic)
            return

        try:
            tracing = messages.TracingNotification.verify(message)
            self._trigger_callback('on_trace', conn_id, tracing['trace'])
        except Exception:
            self._logger.exception("Error processing trace conn_id=%d",
                                   conn_id)

    def _on_status_message(self, sequence, topic, message):
        """Process a status message received

        Args:
            sequence (int): The sequence number of the packet received
            topic (string): The topic this message was received on
            message (dict): The message itself
        """

        self._logger.debug("Received message on (topic=%s): %s" %
                           (topic, message))

        try:
            conn_key = self._find_connection(topic)
        except ArgumentError:
            self._logger.warn(
                "Dropping message that does not correspond with a known connection, message=%s",
                message)
            return

        if messages.ConnectionResponse.matches(message):
            if self.name != message['client']:
                self._logger.debug(
                    "Connection response received for a different client, client=%s, name=%s",
                    message['client'], self.name)
                return

            self.conns.finish_connection(conn_key, message['success'],
                                         message.get('failure_reason', None))
        else:
            self._logger.warn(
                "Dropping message that did not correspond with a known schema, message=%s",
                message)

    def _on_response_message(self, sequence, topic, message):
        """Process a response message received

        Args:
            sequence (int): The sequence number of the packet received
            topic (string): The topic this message was received on
            message (dict): The message itself
        """

        try:
            conn_key = self._find_connection(topic)
            context = self.conns.get_context(conn_key)
        except ArgumentError:
            self._logger.warn(
                "Dropping message that does not correspond with a known connection, message=%s",
                message)
            return

        if 'client' in message and message['client'] != self.name:
            self._logger.debug(
                "Dropping message that is for another client %s, we are %s",
                message['client'], self.name)

        if messages.DisconnectionResponse.matches(message):
            self.conns.finish_disconnection(
                conn_key, message['success'],
                message.get('failure_reason', None))
        elif messages.OpenInterfaceResponse.matches(message):
            self.conns.finish_operation(conn_key, message['success'],
                                        message.get('failure_reason', None))
        elif messages.RPCResponse.matches(message):
            rpc_message = messages.RPCResponse.verify(message)
            self.conns.finish_operation(
                conn_key, rpc_message['success'],
                rpc_message.get('failure_reason', None),
                rpc_message.get('status', None),
                rpc_message.get('payload', None))
        elif messages.ProgressNotification.matches(message):
            progress_callback = context.get('progress_callback', None)
            if progress_callback is not None:
                progress_callback(message['done_count'],
                                  message['total_count'])
        elif messages.ScriptResponse.matches(message):
            if 'progress_callback' in context:
                del context['progress_callback']

            self.conns.finish_operation(conn_key, message['success'],
                                        message.get('failure_reason', None))
        elif messages.DisconnectionNotification.matches(message):
            try:
                conn_key = self._find_connection(topic)
                conn_id = self.conns.get_connection_id(conn_key)
            except ArgumentError:
                self._logger.warn(
                    "Dropping disconnect notification that does not correspond with a known connection, topic=%s",
                    topic)
                return

            self.conns.unexpected_disconnect(conn_key)
            self._trigger_callback('on_disconnect', self.id, conn_id)
        else:
            self._logger.warn("Invalid response message received, message=%s",
                              message)
Exemple #51
0
 def __init__(self, connection_manager=ConnectionManager()):
     self.connection_manager = connection_manager
     self.exchange_initialize()
     self.channel_initialize()
class Daemon(RegexMatchingEventHandler):
    # The path for configuration directory and daemon configuration file
    CONFIG_DIR = os.path.join(os.environ['HOME'], '.PyBox')
    CONFIG_FILEPATH = os.path.join(CONFIG_DIR, 'daemon_config')

    # Default configuration for Daemon, loaded if fail to load the config file from CONFIG_DIR
    DEF_CONF = OrderedDict()
    DEF_CONF['local_dir_state_path'] = os.path.join(CONFIG_DIR, 'local_dir_state')
    DEF_CONF['sharing_path'] = os.path.join(os.environ['HOME'], 'sharing_folder')
    DEF_CONF['cmd_address'] = 'localhost'
    DEF_CONF['cmd_port'] = 50001
    DEF_CONF['api_suffix'] = '/API/V1/'
    DEF_CONF['server_address'] = 'http://localhost:5000'

    IGNORED_REGEX = ['.*\.[a-zA-z]+?#',  # Libreoffice suite temporary file ignored
                     '.*\.[a-zA-Z]+?~',  # gedit issue solved ignoring this pattern:
                     # gedit first delete file, create, and move to dest_path *.txt~
    ]

    # Calculate int size in the machine architecture
    INT_SIZE = struct.calcsize('!i')

    # Allowed operation before user is activated
    ALLOWED_OPERATION = {'register', 'activate'}

    def __init__(self, cfg_path=None, sharing_path=None):
        RegexMatchingEventHandler.__init__(self, ignore_regexes=Daemon.IGNORED_REGEX, ignore_directories=True)

        # Just Initialize variable the Daemon.start() do the other things
        self.daemon_state = 'down'  # TODO implement the daemon state (disconnected, connected, syncronizing, ready...)
        self.running = 0
        self.client_snapshot = {}  # EXAMPLE {'<filepath1>: ['<timestamp>', '<md5>', '<filepath2>: ...}
        self.local_dir_state = {}  # EXAMPLE {'last_timestamp': '<timestamp>', 'global_md5': '<md5>'}
        self.listener_socket = None
        self.observer = None
        self.cfg = self._load_cfg(cfg_path, sharing_path)
        self._init_sharing_path(sharing_path)

        self.conn_mng = ConnectionManager(self.cfg)

    def _build_directory(self, path):
        """
        Create a given directory if not existent
        :param path: the path of dir i want to create
        :return: boolean that indicate if the directory is now created or not.
        """
        if not os.path.isdir(path):
            try:
                os.makedirs(path)
            except OSError:
                print '\nImpossible to create directory at the following path:\n{}\n'.format(path)
                return False
            else:
                print 'Created folder:\n', path
        return True

    def _create_cfg(self, cfg_path, sharing_path=None):
        """
        Create the configuration file of client_daemon.
        If is given custom path for cfg (cfg_path) or observed directory (sharing_path) the config file
        will be updated with that configuration.
        If no cfg_path is given as default we save in default path stored in Daemon.CONFIG_FILEPATH.
        If no sharing_path is given as default we save in default path stored in Daemon.DEF_CONF['sharing_path'].
        :param cfg_path: Path of config
        :param sharing_path: Indicate the path of observed directory
        """

        building_cfg = Daemon.DEF_CONF
        if cfg_path and cfg_path != Daemon.CONFIG_FILEPATH:
            Daemon.CONFIG_FILEPATH = cfg_path
            Daemon.CONFIG_DIR = os.path.dirname(cfg_path)
            building_cfg['local_dir_state_path'] = os.path.join(Daemon.CONFIG_DIR, 'local_dir_state')
        if sharing_path:
            building_cfg['sharing_path'] = sharing_path
        if self._build_directory(Daemon.CONFIG_DIR):
            with open(Daemon.CONFIG_FILEPATH, 'w') as daemon_config:
                json.dump(building_cfg, daemon_config, skipkeys=True, ensure_ascii=True, indent=4)
            return building_cfg
        else:
            self.stop(1, 'Impossible to create cfg file into {}'.format(Daemon.CONFIG_DIR))

    def update_cfg(self):
        """
        Update cfg with new state in self.cfg
        """
        with open(Daemon.CONFIG_FILEPATH, 'w') as daemon_config:
            json.dump(self.cfg, daemon_config, skipkeys=True, ensure_ascii=True, indent=4)

    def _load_cfg(self, cfg_path, sharing_path):
        """
        Load config, if impossible to find it or config file is corrupted restore it and load default configuration
        :param cfg_path: Path of config
        :param sharing_path: Indicate the path of observed directory
        :return: dictionary containing configuration
        """
        if not cfg_path: cfg_path = Daemon.CONFIG_FILEPATH

        if os.path.isfile(cfg_path):
            try:
                with open(cfg_path, 'r') as fo:
                    loaded_config = OrderedDict()
                    for k, v in json.load(fo).iteritems():
                        loaded_config[k] = v
            except ValueError:
                print '\nImpossible to read "{0}"!' \
                      '\nConfig file overwrited and loaded with default configuration!\n'.format(cfg_path)
            else:
                # Check that all the key in DEF_CONF are in loaded_config
                if not [True for k in Daemon.DEF_CONF if k not in loaded_config]:
                    # In the case is all gone right we can update the CONFIG costant and return loaded_config
                    Daemon.CONFIG_FILEPATH = cfg_path
                    Daemon.CONFIG_DIR = os.path.dirname(cfg_path)
                    return loaded_config
                print '\nWarning "{0}" corrupted!\nConfig file overwrited and loaded with default configuration!\n'\
                    .format(cfg_path)
        else:
            print '\nWarning "{0}" doesn\'t exist!' \
                  '\nNew config file created and loaded with default configuration!\n'.format(cfg_path)
        return self._create_cfg(cfg_path, sharing_path)

    def _init_sharing_path(self, sharing_path):
        """
        Check that the sharing folder exists otherwise create it.
        If is not given custom sharing_path we use default stored into self.cfg['sharing_path']
        If is impossible to create the directory exit error message is given.
        """

        if not sharing_path: sharing_path = Daemon.DEF_CONF['sharing_path']
        if self._build_directory(sharing_path):
            self.cfg['sharing_path'] = sharing_path
            self.update_cfg()
        else:
            self.stop(1, '\nImpossible to create sharing folder in path:\n{}\n'
                         'Check sharing_path value contained in cfg file:\n{}\n'
                      .format(self.cfg['sharing_path'], Daemon.CONFIG_FILEPATH))

    def build_client_snapshot(self):
        """
        Build a snapshot of the sharing folder with the following structure

        self.client_snapshot
        {
            "<file_path>":('<timestamp>', '<md5>')
        }
        """
        self.client_snapshot = {}
        for dirpath, dirs, files in os.walk(self.cfg['sharing_path']):
            for filename in files:
                filepath = os.path.join(dirpath, filename)
                unwanted_file = False
                for r in Daemon.IGNORED_REGEX:
                    if re.match(r, filepath) is not None:
                        unwanted_file = True
                        print 'Ignored Path:', filepath
                        break
                if not unwanted_file:
                    relative_path = self.relativize_path(filepath)
                    self.client_snapshot[relative_path] = ['', self.hash_file(filepath)]

    def _is_directory_modified(self):
        """
        The function check if the shared folder has been modified.
        It recalculate the md5 from client_snapshot and compares it with the global md5 stored in local_dir_state
        :return: True or False
        """

        if self.md5_of_client_snapshot() != self.local_dir_state['global_md5']:
            return True
        else:
            return False

    def search_md5(self, searched_md5):
        """
        Receive as parameter the md5 of a file and return the first knowed path with the same md5
        """
        for path, tupla in self.client_snapshot.iteritems():
            if searched_md5 in tupla[1]:
                return path
        else:
            return None

    def _make_copy_on_client(self, src, dst, server_timestamp):
        """
        Copy the file from src to dst if the dst already exists will be overwritten
        :param src: the relative path of source file to copy
        :param dst: the relative path of destination file to copy
        :return: True or False
        """

        abs_src = self.absolutize_path(src)
        if not os.path.isfile(abs_src): return False

        abs_dst = self.absolutize_path(dst)
        dst_dir = os.path.dirname(abs_dst)

        if not os.path.isdir(dst_dir):
            os.makedirs(dst_dir)

        self.observer.skip(abs_dst)
        try:
            copy2(abs_src, abs_dst)
        except IOError as e:
            return False

        self.client_snapshot[dst] = self.client_snapshot[src]
        self.update_local_dir_state(server_timestamp)
        return True

    def _make_move_on_client(self, src, dst, server_timestamp):
        """
        Move the file from src to dst. if the dst already exists will be overwritten
        :param src: the relative path of source file to move
        :param dst: the relative path of destination file to move
        :return: True or False
        """

        abs_src = self.absolutize_path(src)
        if not os.path.isfile(abs_src): return False

        abs_dst = self.absolutize_path(dst)
        dst_dir = os.path.dirname(abs_dst)

        if not os.path.isdir(dst_dir):
            os.makedirs(dst_dir)

        self.observer.skip(abs_dst)
        try:
            move(abs_src, abs_dst)
        except IOError as e:
            return False

        self.client_snapshot[dst] = self.client_snapshot[src]
        self.client_snapshot.pop(src)
        self.update_local_dir_state(server_timestamp)
        return True

    def _sync_process(self, server_timestamp, server_dir_tree):
        # Makes the synchronization logic and return a list of commands to launch
        # for server synchronization

        def _filter_tree_difference(server_dir_tree):
            # process local dir_tree and server dir_tree
            # and makes a diffs classification
            # return a dict representing that classification
            # E.g. { 'new_on_server'     : <[<filepath>, ...]>,  # files in server, but not in client
            # 'modified'          : <[<filepath>, ...]>,  # files in server and client, but different
            # 'new_on_client'     : <[<filepath>, ...]>,  # files not in server, but in client
            # }
            client_files = set(self.client_snapshot.keys())
            server_files = set(server_dir_tree.keys())

            new_on_server = list(server_files.difference(client_files))
            new_on_client = list(client_files.difference(server_files))
            modified = []

            for filepath in server_files.intersection(client_files):
                # check files md5

                if server_dir_tree[filepath][1] != self.client_snapshot[filepath][1]:
                    modified.append(filepath)

            return {'new_on_server': new_on_server, 'modified': modified, 'new_on_client': new_on_client}

        def _check_md5(dir_tree, md5):
            result = []
            for k, v in dir_tree.iteritems():
                if md5 == v[1]:
                    result.append(k)
            return result

        local_timestamp = self.local_dir_state['last_timestamp']
        tree_diff = _filter_tree_difference(server_dir_tree)
        sync_commands = []

        if self._is_directory_modified():
            if local_timestamp == server_timestamp:
                print "local_timestamp == server_timestamp and directory IS modified"
                # simple case: the client has the command
                # it sends all folder modifications to server

                # files in server but not in client: remove them from server
                for filepath in tree_diff['new_on_server']:
                    sync_commands.append(('delete', filepath))
                    # self.conn_mng.dispatch_request('delete', {'filepath': filepath})

                # files modified in client: send modified files to server
                for filepath in tree_diff['modified']:
                    sync_commands.append(('modify', filepath))

                # files in client but not in server: upload them to server
                for filepath in tree_diff['new_on_client']:
                    sync_commands.append(('upload', filepath))
                    # self.conn_mng.dispatch_request('upload', {'filepath': filepath})

            else:  # local_timestamp < server_timestamp
                print "local_timestamp < server_timestamp and directory IS modified"
                assert local_timestamp <= server_timestamp, 'e\' successo qualcosa di brutto nella sync, ' \
                                                            'local_timestamp > di server_timestamp '
                # the server has the command
                for filepath in tree_diff['new_on_server']:
                    file_timestamp, md5 = server_dir_tree[filepath]
                    existed_filepaths_on_client = _check_md5(self.client_snapshot, md5)
                    # If i found at least one path in client_snapshot with the same md5 of filepath this mean that in the past
                    # client_snapshot have stored one or more files with the same md5 but different paths.

                    if existed_filepaths_on_client:
                        # it's a copy or a move
                        for path in existed_filepaths_on_client:
                            if path in tree_diff['new_on_client']:
                                if self._make_move_on_client(path, filepath, server_timestamp):
                                    tree_diff['new_on_client'].remove(path)
                                    break
                                else:
                                    self.stop(0, "move failed on in SYNC: src_path: {}, dest_path: {}".format(path,
                                                                                                              filepath))
                        # we haven't found files deleted on server so it's a copy
                        else:
                            if not self._make_copy_on_client(path, filepath, server_timestamp):
                                self.stop(0,
                                          "copy failed on in SYNC: src_path: {}, dest_path: {}".format(path, filepath))

                    # the daemon don't know filepath, i will search if the file_timestamp is more recent then local_timestamp
                    else:
                        if file_timestamp > local_timestamp:
                            # the files in server is more updated
                            sync_commands.append(('download', filepath))
                            # self.conn_mng.dispatch_request('download', {'filepath': filepath})
                        else:
                            # the client has deleted the file, so delete it on server
                            sync_commands.append(('delete', filepath))
                            # self.conn_mng.dispatch_request('delete', {'filepath': filepath})

                for filepath in tree_diff['modified']:
                    file_timestamp, md5 = server_dir_tree[filepath]

                    if file_timestamp < local_timestamp:
                        # the client has modified the file, so update it on server
                        sync_commands.append(('modify', filepath))
                        # self.conn_mng.dispatch_request('modify', {'filepath': filepath})
                    else:
                        # it's the worst case:
                        # we have a conflict with server,
                        # someone has modified files while daemon was down and someone else has modified
                        # the same file on server
                        conflicted_path = ''.join([filepath, '.conflicted'])
                        self._make_copy_on_client(filepath, conflicted_path, server_timestamp)
                        sync_commands.append(('upload', conflicted_path))
                        # self.conn_mng.dispatch_request('upload', {'filepath': conflicted_path})

                for filepath in tree_diff['new_on_client']:
                    sync_commands.append(('upload', filepath))
                    # self.conn_mng.dispatch_request('upload', {'filepath': filepath})

        else:  # directory not modified
            if local_timestamp == server_timestamp:
                print "local_timestamp == server_timestamp and directory IS NOT modified"
                # it's the best case. Client and server are already synchronized
                return []
            else:  # local_timestamp < server_timestamp
                print "local_timestamp < server_timestamp and directory IS NOT modified"
                assert local_timestamp <= server_timestamp, 'e\' successo qualcosa di brutto nella sync, ' \
                                                            'local_timestamp > di server_timestamp '
                # the server has the command
                for filepath in tree_diff['new_on_server']:
                    timestamp, md5 = server_dir_tree[filepath]
                    existed_filepaths_on_client = _check_md5(self.client_snapshot, md5)
                    # If i found at least one path in client_snapshot with the same md5 of filepath this mean that
                    # in the past client_snapshot have stored one or more files with the same md5 but different paths.

                    if existed_filepaths_on_client:
                        # it's a copy or a move
                        for path in existed_filepaths_on_client:
                            if path in tree_diff['new_on_client']:
                                if self._make_move_on_client(path, filepath, server_timestamp):
                                    tree_diff['new_on_client'].remove(path)
                                    break
                                else:
                                    self.stop(0, "move failed on in SYNC: src_path: {}, dest_path: {}".format(path,
                                                                                                              filepath))
                        # we haven't found files deleted on server so it's a copy
                        else:
                            if not self._make_copy_on_client(path, filepath, server_timestamp):
                                self.stop(0,
                                          "copy failed on in SYNC: src_path: {}, dest_path: {}".format(path, filepath))
                    else:
                        # it's a new file
                        sync_commands.append(('download', filepath))
                        # self.conn_mng.dispatch_request('download', {'filepath': filepath})

                for filepath in tree_diff['modified']:
                    sync_commands.append(('download', filepath))
                    # self.conn_mng.dispatch_request('download', {'filepath': filepath})

                for filepath in tree_diff['new_on_client']:
                    # files that have been deleted on server, so have to delete them
                    abs_filepath = self.absolutize_path(filepath)
                    self.observer.skip(abs_filepath)
                    try:
                        os.remove(abs_filepath)
                    except OSError as e:
                        print "Delete EXEPTION INTO SYNC : {}".format(e)

                    self.client_snapshot.pop(filepath)
                    self.update_local_dir_state(server_timestamp)

        return sync_commands

    def sync_with_server(self):
        """
        Makes the synchronization with server
        """
        response = self.conn_mng.dispatch_request('get_server_snapshot', '')
        if response is None:
            self.stop(1, '\nReceived None snapshot. Server down?\n')

        server_timestamp = response['server_timestamp']
        files = response['files']

        sync_commands = self._sync_process(server_timestamp, files)

        # Initialize the variable where we put the timestamp of the last operation we did
        last_operation_timestamp = server_timestamp

        # makes all synchronization commands
        for command, path in sync_commands:
            if command == 'delete':
                event_timestamp = self.conn_mng.dispatch_request(command, {'filepath': path})
                if event_timestamp:

                    last_operation_timestamp = event_timestamp['server_timestamp']
                    # If i can't find path inside client_snapshot there is inconsistent problem in client_snapshot!
                    if self.client_snapshot.pop(path, 'ERROR') == 'ERROR':
                        print 'Error during delete event INTO SYNC! Impossible to find "{}" inside client_snapshot'\
                            .format(path)
                else:
                    self.stop(1,
                              'Error during connection with the server. Server fail to "delete" this file: {}'.format(
                                  path))

            elif command == 'modify' or command == 'upload':

                new_md5 = self.hash_file(self.absolutize_path(path))
                event_timestamp = self.conn_mng.dispatch_request(command, {'filepath': path, 'md5': new_md5})
                if event_timestamp:
                    last_operation_timestamp = event_timestamp['server_timestamp']
                else:
                    self.stop(1, 'Error during connection with the server. Server fail to "{}" this file: {}'.format(
                        command, path))

            else:  # command == 'download'
                print 'skip di download'
                self.observer.skip(self.absolutize_path(path))
                connection_result = self.conn_mng.dispatch_request(command, {'filepath': path})
                if connection_result:
                    print 'Downloaded file with path "{}" INTO SYNC'.format(path)
                    self.client_snapshot[path] = files[path]
                else:
                    self.stop(1,
                              'Error during connection with the server. Client fail to "download" this file: {}'.format(
                                  path))

        self.update_local_dir_state(last_operation_timestamp)

    def relativize_path(self, abs_path):
        """
        This function relativize the path watched by daemon:
        for example: /home/user/watched/subfolder/ will be subfolder/
        """
        if abs_path.startswith(self.cfg['sharing_path']):
            relative_path = abs_path[len(self.cfg['sharing_path']) + 1:]
            return relative_path
        else:
            raise Exception

    def absolutize_path(self, rel_path):
        """
        This function absolutize a path that i have relativize before:
        for example: subfolder/ will be /home/user/watched/subfolder/
        """
        return os.path.join(self.cfg['sharing_path'], rel_path)

    def create_observer(self):
        """
        Create an instance of the watchdog Observer thread class.
        """
        self.observer = SkipObserver()
        self.observer.schedule(self, path=self.cfg['sharing_path'], recursive=True)

    # TODO handly erorrs in dictionary if the client_dispatcher miss required data!!
    # TODO update struct with new more performance data structure
    # TODO verify what happen if the server return a error message
    # ###################################

    def on_created(self, e):
        def build_data(cmd, rel_new_path, new_md5, founded_path=None):
            """
            Prepares the data from event handler to be delivered to connection_manager.
            """
            data = {'cmd': cmd}
            if cmd == 'copy':
                data['file'] = {'src': founded_path,
                                'dst': rel_new_path,
                                'md5': new_md5,
                                }
            else:
                data['file'] = {'filepath': rel_new_path,
                                'md5': new_md5,
                                }
            return data

        new_md5 = self.hash_file(e.src_path)
        rel_new_path = self.relativize_path(e.src_path)
        founded_path = self.search_md5(new_md5)

        # with this check i found the copy events
        if founded_path:
            print 'start copy'
            data = build_data('copy', rel_new_path, new_md5, founded_path)

        # this elif check that this created aren't modified event
        elif rel_new_path in self.client_snapshot:
            print 'start modified FROM CREATE!!!!!'
            data = build_data('modify', rel_new_path, new_md5)

        else:  # Finally we find a real create event!
            print 'start create'
            data = build_data('upload', rel_new_path, new_md5)

        # Send data to connection manager dispatcher and check return value.
        # If all go right update client_snapshot and local_dir_state
        event_timestamp = self.conn_mng.dispatch_request(data['cmd'], data['file'])
        print 'event_timestamp di "{}" = {}'.format(data['cmd'], event_timestamp)
        if event_timestamp:
            self.client_snapshot[rel_new_path] = [event_timestamp, new_md5]
            self.update_local_dir_state(event_timestamp['server_timestamp'])
        else:
            self.stop(1, 'Impossible to connect with the server. Failed during "{0}" operation on "{1}" file'
                      .format(data['cmd'], e.src_path))

    def on_moved(self, e):

        print 'start move'
        rel_src_path = self.relativize_path(e.src_path)
        rel_dest_path = self.relativize_path(e.dest_path)
        # If i can't find rel_src_path inside client_snapshot there is inconsistent problem in client_snapshot!
        if self.client_snapshot.get(rel_src_path, 'ERROR') == 'ERROR':
            self.stop(1,
                      'Error during move event! Impossible to find "{}" inside client_snapshot'.format(rel_dest_path))
        md5 = self.client_snapshot[rel_src_path][1]
        data = {'src': rel_src_path,
                'dst': rel_dest_path,
                'md5': md5,
                }
        # Send data to connection manager dispatcher and check return value.
        # If all go right update client_snapshot and local_dir_state
        event_timestamp = self.conn_mng.dispatch_request('move', data)
        print 'event_timestamp di "move" =', event_timestamp
        if event_timestamp:
            self.client_snapshot[rel_dest_path] = [event_timestamp, md5]
            # I'm sure that rel_src_path exists inside client_snapshot because i check above so i don't check pop result
            self.client_snapshot.pop(rel_src_path)
            self.update_local_dir_state(event_timestamp['server_timestamp'])
        else:
            self.stop(1, 'Impossible to connect with the server. Failed during "move" operation on "{}" file'.format(
                e.src_path))

    def on_modified(self, e):

        print 'start modified'
        new_md5 = self.hash_file(e.src_path)
        rel_path = self.relativize_path(e.src_path)

        data = {'filepath': rel_path,
                'md5': new_md5
                }

        # Send data to connection manager dispatcher and check return value.
        # If all go right update client_snapshot and local_dir_state
        event_timestamp = self.conn_mng.dispatch_request('modify', data)
        if event_timestamp:
            print 'event_timestamp di "modified" =', event_timestamp
            self.client_snapshot[rel_path] = [event_timestamp, new_md5]
            self.update_local_dir_state(event_timestamp['server_timestamp'])
        else:
            self.stop(1, 'Impossible to connect with the server. Failed during "delete" operation on "{}" file'.format(
                e.src_path))

    def on_deleted(self, e):

        print 'start delete'
        rel_deleted_path = self.relativize_path(e.src_path)

        # Send data to connection manager dispatcher and check return value.
        # If all go right update client_snapshot and local_dir_state
        event_timestamp = self.conn_mng.dispatch_request('delete', {'filepath': rel_deleted_path})
        if event_timestamp:
            print 'event_timestamp di "delete" =', event_timestamp
            # If i can't find rel_deleted_path inside client_snapshot there is inconsistent problem in client_snapshot!
            if self.client_snapshot.pop(rel_deleted_path, 'ERROR') == 'ERROR':
                print 'Error during delete event! Impossible to find "{}" inside client_snapshot'.format(
                    rel_deleted_path)
            self.update_local_dir_state(event_timestamp['server_timestamp'])
        else:
            self.stop(1, 'Impossible to connect with the server. Failed during "delete" operation on "{}" file'.format(
                e.src_path))

    def _get_cmdmanager_request(self, socket):
        """
        Communicate with cmd_manager and get the request
        Returns the request decoded by json format or None if cmd_manager send connection closure
        """
        packet_size = socket.recv(Daemon.INT_SIZE)
        if len(packet_size) == Daemon.INT_SIZE:

            packet_size = int(struct.unpack('!i', packet_size)[0])
            packet = ''
            remaining_size = packet_size

            while len(packet) < packet_size:
                packet_buffer = socket.recv(remaining_size)
                remaining_size -= len(packet_buffer)
                packet = ''.join([packet, packet_buffer])

            req = json.loads(packet)
            return req
        else:
            return None

    def _set_cmdmanager_response(self, socket, message):
        """
        Makes cmd_manager response encoding it in json format and send it to cmd_manager
        """
        response = {'message': message}
        response_packet = json.dumps(response)
        socket.sendall(struct.pack('!i', len(response_packet)))
        socket.sendall(response_packet)
        return response_packet

    def _initialize_observing(self):
        """
        Intial operation for observing.
        We create the client_snapshot, load the information stored inside local_dir_state and create observer.
        """
        self.build_client_snapshot()
        self.load_local_dir_state()
        self.create_observer()
        self.observer.start()
        self.sync_with_server()

    def _activation_check(self, s, cmd, data):
        """
        This method allow only registration and activation of user until this will be accomplished.
        In case of bad cmd this will be refused otherwise if the server response are successful
        we update the daemon_config and after activation of user start the observing.
        :param s: connection socket with client_cmdmanager
        :param cmd: received cmd from client_cmdmanager
        :param data: received data from client_cmdmanager
        """
        if cmd not in Daemon.ALLOWED_OPERATION:
            self._set_cmdmanager_response(s, 'Operation not allowed! Authorization required.')
        else:
            response = self.conn_mng.dispatch_request(cmd, data)
            if response['successful']:
                if cmd == 'register':
                    self.cfg['user'] = data[0]
                    self.cfg['pass'] = data[1]
                    self.update_cfg()
                elif cmd == 'activate':
                    self.cfg['activate'] = True
                    # Update the information about cfg into connection manager
                    self.conn_mng.load_cfg(self.cfg)
                    self.update_cfg()
                    # Now the client_daemon is ready to operate, we do the start activity
                    self._initialize_observing()
            self._set_cmdmanager_response(s, response)

    def start(self):
        """
        Starts the communication with the command_manager.
        """
        # If user is activated we can start observing.
        if self.cfg.get('activate', False):
            self._initialize_observing()

        TIMEOUT_LISTENER_SOCK = 0.5
        BACKLOG_LISTENER_SOCK = 1
        self.listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.listener_socket.bind((self.cfg['cmd_address'], self.cfg['cmd_port']))
        self.listener_socket.listen(BACKLOG_LISTENER_SOCK)
        r_list = [self.listener_socket]
        self.daemon_state = 'started'
        self.running = 1
        polling_counter = 0
        try:
            while self.running:
                r_ready, w_ready, e_ready = select.select(r_list, [], [], TIMEOUT_LISTENER_SOCK)

                for s in r_ready:

                    if s == self.listener_socket:
                        # handle the server socket
                        client_socket, client_address = self.listener_socket.accept()
                        r_list.append(client_socket)
                    else:
                        # handle all other sockets
                        req = self._get_cmdmanager_request(s)

                        if req:
                            for cmd, data in req.iteritems():
                                if cmd == 'shutdown':
                                    self._set_cmdmanager_response(s, 'Deamon is shuting down')
                                    raise KeyboardInterrupt
                                else:
                                    if not self.cfg.get('activate', False):
                                        self._activation_check(s, cmd, data)
                                    else:  # client is already activated
                                        response = self.conn_mng.dispatch_request(cmd, data)
                                        # for now the protocol is that for request sent by
                                        # command manager, the server reply with a string
                                        # so, to maintain the same data structure during
                                        # daemon and cmdmanager comunications, it rebuild a json
                                        # to send like response
                                        # TODO it's advisable to make attention to this assertion or refact the architecture
                                        self._set_cmdmanager_response(s, response)
                        else:  # it receives the FIN packet that close the connection
                            s.close()
                            r_list.remove(s)

                if self.cfg.get('activate', False):
                    # synchronization polling
                    # makes the polling every 3 seconds, so it waits six cycle (0.5 * 6 = 3 seconds)
                    # maybe optimizable but now functional
                    polling_counter += 1
                    if polling_counter == 6:
                        polling_counter = 0
                        self.sync_with_server()

        except KeyboardInterrupt:
            self.stop(0)
        if self.cfg.get('activate', False):
            self.observer.stop()
            self.observer.join()
        self.listener_socket.close()

    def stop(self, exit_status, exit_message=None):
        """
        Stop the Daemon components (observer and communication with command_manager).
        """
        if self.daemon_state == 'started':
            self.running = 0
            self.daemon_state = 'down'
            if self.local_dir_state:
                self.save_local_dir_state()
        if exit_message:
            print exit_message
        exit(exit_status)

    def update_local_dir_state(self, last_timestamp):
        """
        Update the local_dir_state with last_timestamp operation and save it on disk
        """

        self.local_dir_state['last_timestamp'] = last_timestamp
        self.local_dir_state['global_md5'] = self.md5_of_client_snapshot()
        self.save_local_dir_state()

    def save_local_dir_state(self):
        """
        Save local_dir_state on disk
        """
        json.dump(self.local_dir_state, open(self.cfg['local_dir_state_path'], "w"), indent=4)
        print "local_dir_state saved"

    def load_local_dir_state(self):
        """
        Load local dir state on self.local_dir_state variable
        if file doesn't exists it will be created without timestamp
        """

        def _rebuild_local_dir_state():
            self.local_dir_state = {'last_timestamp': 0, 'global_md5': self.md5_of_client_snapshot()}
            json.dump(self.local_dir_state, open(self.cfg['local_dir_state_path'], "w"), indent=4)

        if os.path.isfile(self.cfg['local_dir_state_path']):
            self.local_dir_state = json.load(open(self.cfg['local_dir_state_path'], "r"))
            print "Loaded local_dir_state"
        else:
            print "local_dir_state not found. Initialize new local_dir_state"
            _rebuild_local_dir_state()

    def md5_of_client_snapshot(self, verbose=0):
        """
        Calculate the md5 of the entire directory snapshot,
        with the md5 in client_snapshot and the md5 of full filepath string.
        :return is the md5 hash of the directory
        """

        if verbose:
            start = time.time()
        md5Hash = hashlib.md5()

        for path, time_md5 in sorted(self.client_snapshot.iteritems()):
            # extract md5 from tuple. we don't need hexdigest it's already md5
            if verbose:
                print path
            md5Hash.update(time_md5[1])
            md5Hash.update(path)

        if verbose:
            stop = time.time()
            print stop - start
        return md5Hash.hexdigest()

    def hash_file(self, file_path, chunk_size=1024):
        """
        :accept an absolute file path
        :return the md5 hash of received file
        """

        md5Hash = hashlib.md5()
        try:
            f1 = open(file_path, 'rb')
            while 1:
                # Read file in as little chunks
                buf = f1.read(chunk_size)
                if not buf:
                    break
                md5Hash.update(buf)
            f1.close()
            return md5Hash.hexdigest()
        except (OSError, IOError) as e:
            print e
            return None
Exemple #53
0
class PreprocessIfileManager:
    WORK_MEM = 10240

    def __init__(self, connectionStr):
        self.connMgr = ConnectionManager()
        self.conn = self.connMgr.connectToDatabase(connectionStr)
        self.cur = self.conn.cursor()
        self.fdm = ForeignDataManager()
        self.cur.execute("set work_mem to %s", (self.WORK_MEM, ))
        #print("Preprocess IFile Manager Initialized.")

    def getCvIdOfTerm(self, term):
        self.cur.execute("select cv_id from cv where lower(term)=%s",
                         (term.lower(), ))
        cv_id = self.cur.fetchone()
        if cv_id is not None:
            return cv_id[0]
        else:
            return cv_id

    def getCvIdOfGroupAndTerm(self, group, term):
        self.cur.execute(
            "select cv_id from cv where lower(\"group\")=%s and lower(term)=%s",
            (group.lower(), term.lower()))
        cv_id = self.cur.fetchone()
        if cv_id is not None:
            return cv_id[0]
        else:
            return cv_id

    #select data_type from information_schema.columns where table_name='marker' and column_name='platform_id';
    def getTypeOfColumn(self, table, column):
        self.cur.execute(
            "select data_type from information_schema.columns where table_name=%s and column_name=%s",
            (table, column))
        res = self.cur.fetchone()
        if res is not None:
            return res[0]
        else:
            return res

    def getColumnListOfTable(self, table):
        self.cur.execute(
            "select column_name from information_schema.columns where table_name = %s",
            (table, ))
        res = self.cur.fetchall()
        return res

    def dropForeignTable(self, fdwTableName):
        self.cur.execute("drop foreign table if exists " + fdwTableName)

    def createForeignTable(self, iFile, fTableName):
        header, fdwScript = self.fdm.generateFDWScript(iFile, fTableName)
        #print("fdwScript: %s" % fdwScript)
        self.cur.execute(fdwScript)
        return header

    def createFileWithDerivedIdsV1(self, outputFilePath, derivedIdSql):
        copyStmt = "copy (" + derivedIdSql + ") to '" + outputFilePath + "' with delimiter E'\\t'" + " csv header;"
        #print("copyStmt = "+copyStmt)
        self.cur.execute(copyStmt)

    def createFileWithDerivedIds(self, outputFilePath, derivedIdSql):
        copyStmt = "copy (" + derivedIdSql + ") to STDOUT with delimiter E'\\t'" + " csv header;"
        with open(outputFilePath, 'w') as outputFile:
            #let's try 20MB buffer size for a start, default was 8MB
            self.cur.copy_expert(copyStmt, outputFile, 20480)
        outputFile.close()

    def commitTransaction(self):
        self.conn.commit()

    def rollbackTransaction(self):
        self.conn.rollback()

    def closeConnection(self):
        self.connMgr.disconnectFromDatabase()
from background_monitor import BackgroundMonitor
from database import Database
from connection_manager import ConnectionManager
from config import SERVER_URL

if __name__ == '__main__':
    connection_manager = ConnectionManager(SERVER_URL)
    connection_manager.authorize()
    database = Database(connection_manager)

    background_monitor = BackgroundMonitor(database)
    background_monitor.run()
class TestConnectionManager(unittest.TestCase):

    def setUp(self):
        httpretty.enable()
        create_environment()
        make_fake_dir()
        with open(CONFIG_FILEPATH, 'r') as fo:
            self.cfg = json.load(fo)

        self.auth = (self.cfg['user'], self.cfg['pass'])
        self.base_url = ''.join([self.cfg['server_address'], self.cfg['api_suffix']])
        self.files_url = ''.join([self.base_url, 'files/'])
        self.actions_url = ''.join([self.base_url, 'actions/'])
        self.shares_url = ''.join([self.base_url, 'shares/'])
        self.user_url = ''.join([self.base_url, 'users/'])

        self.cm = ConnectionManager(self.cfg)

    def tearDown(self):
        httpretty.disable()
        httpretty.reset()
        remove_fake_dir()

    @httpretty.activate
    def test_register_user(self):
        """
        Test register user api:
        method = POST
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = ''.join((self.user_url, USR))
        content = 'user created'
        content_jsoned = json.dumps(content)
        httpretty.register_uri(httpretty.POST, url, status=200, body=content_jsoned)
        response = self.cm.do_register(data)
        self.assertIn('content', response)
        self.assertEqual(response['content'], content)
        self.assertTrue(response['successful'])

    @httpretty.activate
    def test_register_user_with_weak_password(self):
        """
        Test register user api with weak password:
        method = POST
        resource = <user>
        data = password=<password>
        """
        weak_password = '******'
        data = (USR, weak_password)
        url = ''.join((self.user_url, USR))
        content = {'type_of_improvement': 'improvement suggested'}
        content_jsoned = json.dumps(content)
        httpretty.register_uri(httpretty.POST, url, status=403, body=content_jsoned)
        response = self.cm.do_register(data)
        self.assertIn('improvements', response)
        self.assertEqual(response['improvements'], content)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_register_user_with_already_existent_user(self):
        """
        Test register user api with already existent user:
        method = POST
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = ''.join((self.user_url, USR))
        # This is the only case where server doesn't send data with the message error
        httpretty.register_uri(httpretty.POST, url, status=409)
        response = self.cm.do_register(data)
        self.assertIn('content', response)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_fail_to_register_user(self):
        """
        Test failed register request
        Test activate user api:
        method = POST
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = ''.join((self.user_url, USR))
        httpretty.register_uri(httpretty.POST, url, status=500)

        response = self.cm.do_register(data)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_activate_user(self):
        """
        Test successful activation
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = '6c9fb345c317ad1d31ab9d6445d1a820'
        data = (user, token)
        url = ''.join((self.user_url, user))
        answer = 'user activated'
        answer_jsoned = json.dumps(answer)
        httpretty.register_uri(httpretty.PUT, url, status=201, body=answer_jsoned)

        response = self.cm.do_activate(data)
        self.assertIsInstance(response['content'], unicode)
        self.assertTrue(response['successful'])

    @httpretty.activate
    def test_activate_user_already_existent(self):
        """
        Test activate user already existent
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = 'bad_token'
        data = (user, token)
        url = ''.join((self.user_url, user))
        httpretty.register_uri(httpretty.PUT, url, status=409)

        response = self.cm.do_activate(data)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_activate_user_not_existent(self):
        """
        Test activate user not existent
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = 'bad_token'
        data = (user, token)
        url = ''.join((self.user_url, user))
        httpretty.register_uri(httpretty.PUT, url, status=404)

        response = self.cm.do_activate(data)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_fail_to_activate_user(self):
        """
        Test failed activation request
        Test activate user api:
        method = PUT
        resource = <user>
        data = activation_code=<token>
        """
        user = '******'
        token = 'bad_token'
        data = (user, token)
        url = ''.join((self.user_url, user))
        httpretty.register_uri(httpretty.PUT, url, status=500)

        response = self.cm.do_activate(data)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_login_user(self):
        """
        Test login user api:
        method = get
        resource = <user>
        data = password=<password>
        """
        data = (USR, PW)
        url = self.files_url
        content = {'file1': 'foo.txt', 'file2': 'dir/foo.txt'}
        content_jsoned = json.dumps(content)
        httpretty.register_uri(httpretty.GET, url, status=200, body=content_jsoned)
        response = self.cm.do_login(data)
        self.assertIn('content', response)
        self.assertIsInstance(response['content'], str)
        self.assertTrue(response['successful'])

    @httpretty.activate
    def test_login_user_failed(self):
        """
        Test login user api with weak password:
        method = GET
        resource = <user>
        data = password=<password>
        """
        data = ('bad_user', 'bad_pass')
        url = self.files_url
        httpretty.register_uri(httpretty.GET, url, status=401)
        response = self.cm.do_login(data)
        self.assertIn('content', response)
        self.assertIsInstance(response['content'], str)
        self.assertFalse(response['successful'])

    @httpretty.activate
    def test_post_recover_password_not_found(self):
        """
        Test that if /users/<email>/reset POST == 404 then cm return None
        """
        # An unknown user (neither registered nor pending) is a resource not found for the server...
        email = '*****@*****.**'
        url = self.user_url + email + '/reset'
        # ...so the server should return a 404:
        httpretty.register_uri(httpretty.POST, url, status=404)
        # and the command manager must return None in this case
        response = self.cm.do_reqrecoverpass(email)
        self.assertIsNone(response)

    @httpretty.activate
    def test_post_recover_password_accept(self):
        """
        Test that if /users/<email>/reset POST == 202 then cm return True
        """
        email = '*****@*****.**'
        url = self.user_url + email + '/reset'
        httpretty.register_uri(httpretty.POST, url, status=202)
        response = self.cm.do_reqrecoverpass(email)
        self.assertTrue(response)

    @httpretty.activate
    def test_put_recover_password_not_found(self):
        """
        Test that if /users/<email> PUT == 404 then cm return None
        """
        email = '*****@*****.**'
        recoverpass_code = os.urandom(16).encode('hex')
        new_password = '******'
        url = self.user_url + email
        httpretty.register_uri(httpretty.PUT, url, status=404)
        data = email, recoverpass_code, new_password
        response = self.cm.do_recoverpass(data)
        self.assertFalse(response)

    @httpretty.activate
    def test_put_recover_password_ok(self):
        """
        Test that if /users/<email> PUT == 200 then cm return True
        """
        email = '*****@*****.**'
        recoverpass_code = os.urandom(16).encode('hex')
        new_password = '******'
        url = self.user_url + email
        httpretty.register_uri(httpretty.PUT, url, status=200)
        data = email, recoverpass_code, new_password
        response = self.cm.do_recoverpass(data)
        self.assertTrue(response)

    # files:
    @httpretty.activate
    def test_download_normal_file(self):
        url = ''.join((self.files_url, 'file.txt'))

        httpretty.register_uri(httpretty.GET, url, status=201)
        data = {'filepath': 'file.txt'}
        response = self.cm.do_download(data)
        self.assertEqual(response['successful'], True)

    @httpretty.activate
    def test_download_file_not_exists(self):
        url = ''.join((self.files_url, 'file.tx'))

        httpretty.register_uri(httpretty.GET, url, status=404)
        data = {'filepath': 'file.tx'}
        response = self.cm.do_download(data)
        self.assertEqual(response['successful'], False)
        self.assertIsInstance(response['content'], str)

    @httpretty.activate
    def test_do_upload_success(self):

        # prepare fake server
        url = ''.join((self.files_url, 'foo.txt'))
        msg = {'server_timestamp': time.time()}
        js = json.dumps(msg)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        # call api
        response = self.cm.do_upload({'filepath': 'foo.txt', 'md5': 'test_md5'})
        self.assertTrue(response['successful'])
        self.assertEqual(response['content'], msg)

    @httpretty.activate
    def test_encode_of_url_with_strange_char(self):
        """
        Test the url encode of filename with strange char.
        I use upload method for example and i expect that httpretty answer at the right URL.
        """
        # Create the file with strange name
        strange_filename = 'name%with#strange~char'
        strange_filepath = os.path.join(TEST_SHARING_FOLDER, strange_filename)
        with open(strange_filepath, 'w') as f:
            f.write('file with strange name content')

        # prepare fake server
        encoded_filename = urllib.quote(strange_filename, self.cm.ENCODER_FILTER)
        url = ''.join((self.files_url, encoded_filename))
        print url
        msg = {'server_timestamp': time.time()}
        js = json.dumps(msg)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        # call api
        response = self.cm.do_upload({'filepath': strange_filename, 'md5': 'test_md5'})
        self.assertTrue(response['successful'])
        self.assertEqual(response['content'], msg)

    # actions:
    @httpretty.activate
    def test_do_move(self):
        url = ''.join((self.actions_url, 'move'))
        msg = {'server_timestamp': time.time()}
        js = json.dumps(msg)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_move({'src': 'foo.txt', 'dst': 'folder/foo.txt'})
        self.assertTrue(response['successful'])
        self.assertEqual(response['content'], msg)

    @httpretty.activate
    def test_do_delete(self):
        url = ''.join((self.actions_url, 'delete'))
        msg = {'server_timestamp': time.time()}
        js = json.dumps(msg)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")
        d = {'filepath': 'foo.txt'}

        response = self.cm.do_delete(d)
        self.assertTrue(response['successful'])
        self.assertEqual(response['content'], msg)

    @httpretty.activate
    def test_do_modify(self):
        url = ''.join((self.files_url, 'foo.txt'))
        msg = {'server_timestamp': time.time()}
        js = json.dumps(msg)
        httpretty.register_uri(httpretty.PUT, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_modify({'filepath': 'foo.txt', 'md5': 'test_md5'})
        self.assertTrue(response['successful'])
        self.assertEqual(response['content'], msg)

    @httpretty.activate
    def test_do_copy(self):
        url = ''.join([self.actions_url, 'copy'])
        d = {'src': 'foo.txt', 'dst': 'folder/foo.txt'}
        msg = {'server_timestamp': time.time()}
        js = json.dumps(msg)
        httpretty.register_uri(httpretty.POST, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_copy(d)
        self.assertTrue(response['successful'])
        self.assertEqual(response['content'], msg)

    @httpretty.activate
    def test_get_server_snapshot(self):
        url = self.files_url
        msg = {'files': 'foo.txt'}
        js = json.dumps(msg)

        httpretty.register_uri(httpretty.GET, url, status=201,
                               body=js,
                               content_type="application/json")

        response = self.cm.do_get_server_snapshot('')
        self.assertTrue(response['successful'])
        self.assertEqual(response['content'], msg)
Exemple #56
0
class Master:

    def msg_callback(self, ch, method, properties, body):
        callback_set = {'SUCCESS': self.success,
                        'FAIL': self.fail,
                        'AWAKE': self.update_slave_response_time,
                        'STOP': self.stop,
                        'ADD_SLAVE': self.add_slave,
                        'KILL_SLAVE': self.kill_slave,
                        'RESTART_SLAVE': self.restart_slave,
                        'STAT': self.stat,
                        'START': self.start,
                        'RECONFIGURE': self.configure,
                        'REFRESH': self.refresh
                        }
        try:
            command = body[:body.find(' ')]
            info = body[body.find(' ')+1:]
            if command in callback_set:
                callback_set[command](ujson.loads(info))
            else:
                logging.debug(" [x] Unknown command %r" % (str(body),))
        except KeyError as e:
            if str(e) == "'Queue.DeclareOk'":
                logging.debug("Queue.DelcareOk at %r" % (str(body),))
            else:
                logging.error("Unknown KeyError at %r:" % (str(body),))
        except RuntimeError as e:
            if 'recursion' in str(e):
                logging.error('MAXIMUM RECURSION ERROR')

    def __init__(self, conf_file):
        self.config = ConfigParser.ConfigParser(allow_no_value=True)
        self.clean_time_gap = None
        self.wait_time_for_slave = None
        self.master_queue_name = None
        self.task_queue_name = None
        self.task_queue_size_limit = None
        self.task_file_name = None
        self.task_counter_file = None
        self.ssh_key = None
        self.s3_bucket = None
        self.s3_folder = None
        self.slave_num_every_packup = None
        self.slave_max_sec_each_task = None
        self.slave_python_version = None
        self.master_ip = None
        self.slaves_ip = None
        self.slave_awake_frequency = None
        self.configure(conf_file)

        self.last_wake_time = None

        self.repeated_timer = None
        self.is_started = False
        self.pop_forever_handler = None

        logging.info('Starting task manager...')
        self.task_manager = TaskManager(self.task_file_name, self.task_counter_file)
        logging.info('Starting slave manager...')
        self.slave_manager = SlaveManager(master_ip=self.master_ip,
                                          slaves_ip=self.slaves_ip,
                                          ssh_key=self.ssh_key,
                                          s3_bucket=self.s3_bucket,
                                          s3_folder=self.s3_folder,
                                          slave_num_every_packup=self.slave_num_every_packup,
                                          slave_max_sec_each_task=self.slave_max_sec_each_task,
                                          slave_python_version=self.slave_python_version,
                                          slave_awake_frequency=self.slave_awake_frequency,
                                          slave_buffer_size=1)
        logging.info('Starting connection manager...')
        self.message_connection = ConnectionManager(queue_name=self.master_queue_name,
                                                    durable=False,
                                                    callback=self.msg_callback,
                                                    no_ack=True)

    def run(self):
        logging.info(' [*] Waiting for messages. To exit press CTRL+C')
        try:
            self.message_connection.start_accepting_message()
        except KeyboardInterrupt:
            logging.info('Stopping master...')
            master.stop(None)
        except EOFError:
            logging.info('Download finishes. Shutting down master.')
            master.stop(None)
        # except Exception as e:
        #     logging.info(str(e))
        #     logging.info('Stopping master...')

    # TODO: write all configuration in one file
    def configure(self, conf_file):
        self.config.read(conf_file)
        self.clean_time_gap = self.config.getint('main', 'clean_time_gap')
        self.wait_time_for_slave = self.config.getint('main', 'wait_time_for_slave')
        self.slave_awake_frequency = self.config.get('main', 'slave_awake_frequency')
        self.master_ip = self.config.get('main', 'master_private_ip')
        self.slaves_ip = self.config.get('main', 'slaves_private_ip')
        self.master_queue_name = self.config.get('main', 'master_queue_name')
        self.task_queue_name = self.config.get('main', 'task_queue_name')
        self.task_file_name = self.config.get('main', 'task_file')
        self.task_queue_size_limit = int(self.config.get('main', 'task_queue_size_limit'))
        self.task_counter_file = self.config.get('main', 'task_counter_file')
        self.ssh_key = self.config.get('main', 'ssh_key')
        self.s3_bucket = self.config.get('main', 's3_bucket')
        self.s3_folder = self.config.get('main', 's3_folder')
        self.slave_num_every_packup = self.config.get('main', 'slave_num_every_packup')
        self.slave_max_sec_each_task = self.config.get('main', 'slave_max_sec_each_task')
        self.slave_python_version = self.config.get('main', 'slave_python_version')

    def add_slave(self, slave_info):
        if self.slave_manager.exist_slave(slave_info):
            logging.info('Slave ' + slave_info['host'] + ' already exists.')
            return
        logging.info('master: add slave' + str(slave_info))
        new_slave_info = self.slave_manager.add_slave(slave_info)
        self.slave_manager.run_slave(new_slave_info)
        # TODO:

    def kill_slave(self, slave_info):
        if not self.slave_manager.exist_slave(slave_info):
            return
        logging.info('kill slave ' + str(slave_info))
        self.slave_manager.kill_slave(slave_info)

    def restart_slave(self, slave_info):
        logging.info(slave_info['host'])
        logging.info('restart_slave' + str(slave_info))
        self.kill_slave(slave_info)
        self.add_slave(slave_info)

    def start(self, info):
        logging.info('Master Starts')
        self.last_wake_time = datetime.datetime.utcnow()
        self.is_started = True

        self.pop_forever_handler = threading.Thread(target=self.start_popping_tasks)
        self.pop_forever_handler.start()

        self.repeated_timer = RepeatedTimer(self.clean_time_gap, self.notice_refresh, None)

    def pop_forever(self):
        self.start_popping_tasks()

    def get_task_queue_size(self):
        pass

    # TODO: There is a bottle neck here
    def start_popping_tasks(self):
        task_connection = ConnectionManager(queue_name=self.task_queue_name,
                                            durable=True, no_ack=False)
        eof_reached = False
        while self.is_started and not eof_reached:
            current_task_queue_size = task_connection.get_task_queue_size()
            while self.is_started and current_task_queue_size < self.task_queue_size_limit:
                task = self.task_manager.pop_task()
                if task is None:
                    # TODO: Don't use Error. Just break and handle the case later in this function
                    logging.info('EOF Reached')
                    eof_reached = True
                    break
                message = 'WORK ' + ujson.dumps(task)
                task_connection.publish(message)
                current_task_queue_size += 1

        task_connection.stop()

    def fail(self, slave_task_info):
        self.task_manager.add_task(slave_task_info['task'])
        self.slave_manager.update_last_response(slave_task_info)

    def success(self, slave_task_info):
        slave_info = self.slave_manager.update_last_response(slave_task_info)

    def update_slave_response_time(self, slave_task_info):
        slave_info = self.slave_manager.update_last_response(slave_task_info)

    def stop(self, info):
        self.is_started = False
        self.notice_slaves_stop()
        if self.pop_forever_handler is not None:
            self.pop_forever_handler.join()
        if self.repeated_timer is not None:
            self.repeated_timer.stop()
        self.slave_manager.stop()
        self.task_manager.stop()
        self.message_connection.stop()

    def notice_slaves_stop(self):
        task_connection = ConnectionManager(queue_name=self.task_queue_name,
                                            durable=True, no_ack=False)
        screen_list = [key for key in self.slave_manager.slave_dict.keys()]
        for screen in screen_list:
            task_connection.publish('STOP {}')
        # task_connection.broadcast_task('STOP {}')
        task_connection.stop()

    def refresh(self, info):
        cur_progress, total_task = self.task_manager.get_progress()
        logging.info('downloading {}/{} files'.format(cur_progress, total_task))
        if not self.is_started:
            return

        # if time interval met, check failed slave
        if self.last_wake_time is None:
            self.last_wake_time = datetime.datetime.utcnow()

        if self.last_wake_time + datetime.timedelta(
                seconds=self.clean_time_gap) > datetime.datetime.utcnow():
            return
        failed_slaves = self.slave_manager.get_failed_slaves(self.wait_time_for_slave)
        if len(failed_slaves) != 0:
            logging.info('Finding failed slaves... ' + str(failed_slaves))
        for slave in failed_slaves:
            self.restart_slave(slave)
        self.last_wake_time = datetime.datetime.utcnow()

    def notice_refresh(self, info):
        try:
            self.message_connection.publish('REFRESH {}')
        except IndexError:
            logging.critical('INDEX_ERROR')

    def stat(self, info):
        logging.info('=====================================')
        logging.info('Num of slave: ', self.slave_manager.get_num_slaves())
        logging.info('=====================================')
        if len(info) > 0:
            for slave in self.slave_manager.slave_list:
                if slave['last_response'] is None:
                    delta = 'new slave'
                else:
                    delta = datetime.datetime.utcnow() - slave['last_response']
                logging.info(slave['host'], '|', slave['queue'], '|', delta)
            logging.info('====================================')