Example #1
0
    def gimmeTrame(self,daNewState):
        """
            Return the update trame to be sent
        """
        if (str(daNewState)=="toggle"):
            if self.current_state=="off":
                #on met à on
                rawTrame="A55A6B0570000000"+ "FF9F1E0"+"7" +"30"
                self.current_state="on"
            elif(self.current_state=="on"):
                #on met à off
                rawTrame="A55A6B0550000000"+ "FF9F1E0"+"7" +"30"
                self.current_state="off"
            else:
                rawTrame="A55A6B0550000000"+ "FF9F1E0"+"7" +"30"
                self.current_state="off"
        else:
            LOGGER.warn("Strange new state : {}. Trram not send".format(daNewState))

            return ""
        LOGGER.info("State after : {}".format(self.current_state))
        myTrame=Trame.trame(rawTrame)
        myTrame.calculateChecksum()
        LOGGER.info("Actuator trame generated, to be send : {}".format(myTrame.lessRawView()))
        self.save()
        return myTrame.rawView()
Example #2
0
def log_request(record):
    global hpclient
    req = json.dumps(record)
    LOGGER.info(req)

    if hpclient and record['is_shellshock']:
        hpclient.publish(app.config['hpfeeds.channel'], req)
Example #3
0
    def checkTrame(self):
        if self.trameUsed:
            LOGGER.debug("Trame received : {}".format(self.trameUsed.lessRawView()))
            if ("A55A" not in self.trameUsed.sep):
                LOGGER.warn("Wrong separator, rejected")

            if (self.doChecksum(self.trameUsed) not in self.trameUsed.checkSum):     
                #Mauvais checkSum
                LOGGER.warn("Wrong checksum, expected : {}, rejected".format(self.doChecksum(self.trameUsed)))

            with self.lock:
                if (self.trameUsed.ident in self.identSet):
                    #Recuperer le capteur en bdd
                    sensorUsed = sensor.Sensor.objects(physic_id=self.trameUsed.ident)[0]
                    newData = '' #la nouvelle data a entrer en base, type dynamique
                    if (sensorUsed.__class__.__name__=="Switch"):
                        newData=sensorUsed.translateTrame(self.trameUsed)
                    elif (sensorUsed.__class__.__name__=="Temperature"):
                        newData = sensorUsed.translateTrame(self.trameUsed)

                    elif (sensorUsed.__class__.__name__=="Position"):
                        newData = sensorUsed.translateTrame(self.trameUsed)
                    else :
                        LOGGER.warn("Other Captor (not handle (YET !) )")
                    # Update de la trame au niveau de la base
                    if newData :
                        sensorUsed.update(newData)
                        LOGGER.info(" Sensor {} ||New data {}".format(sensorUsed.physic_id, sensorUsed.current_state))
            self.trameUsed=''
Example #4
0
def log_request(record):
    global hpclient
    req = json.dumps(record)
    LOGGER.info(req)

    if hpclient and (record['is_shellshock'] or app.config['hpfeeds.only_exploits'].lower() == 'false'):
        hpclient.publish(app.config['hpfeeds.channel'], req)
Example #5
0
 def translateTrame(self,inTrame):
     """
     return the temperature (range 0-40 c) from data byte 2 
     """
     rowTemp=int(inTrame.data1,16)
     temperature = round((rowTemp*40/255.0),3)
     LOGGER.info("Temperature sensor {} with temp {}".format(inTrame.ident, temperature))
     return temperature
Example #6
0
 def sendTrame(self,ident,newState):
     with self.lock:
         sensorUsed=sensor.Device.objects(physic_id=ident)[0]
     daTrame=sensorUsed.gimmeTrame(newState)
     if daTrame:
         self.soc.send(daTrame)
         LOGGER.info("Trame sended : {}".format(daTrame))
         return
Example #7
0
 def sendTrame(self,ident,newState):
 	"""
 		Ask the traductor to send a trame with the new state of a captor
 	"""
 	LOGGER.info("Lazily updating {} with {}".format(ident,newState))
 	self.idToUpdate=ident
 	self.newState=newState
 	self.save()
Example #8
0
 def updateOne(self,ident):
 	"""
 		Ask for update the sensor with this id
 	"""
 	LOGGER.info("lazily updating {}".format(ident))
 	self.idToUpdate=ident
 	self.newState=''
 	self.save()
 def _import_listener(self, name, args):
     listener, source = utils.import_(name, 'listener')
     if not inspect.ismodule(listener):
         listener = listener(*args)
     elif args:
         raise DataError("Listeners implemented as modules do not take arguments")
     LOGGER.info("Imported listener '%s' with arguments %s (source %s)"
                 % (name, utils.seq2str2(args), source))
     return listener
Example #10
0
def web_request(program, url):
    LOGGER.info('Performing {} request on {}'.format(program, url))
    data = ''
    try:
        resp = requests.get(url, headers={'User-Agent': USER_AGENTS[program]})
        data = resp.text
    except Exception as e:
        LOGGER.error(e)
    return '{} {}'.format(program, url), data
Example #11
0
    def _email_config(self):
        try:
            self.mail_server = smtplib.SMTP('smtp.gmail.com', 587)
            self.mail_server.ehlo()
            self.mail_server.starttls()
            self.mail_server.login(self.gmail_user, self.gmail_password)

        except Exception as e:
            LOGGER.info("Failed to connnect. Error: {}".format(e))
            exit()
 def call_method(self, method, *args):
     if self.is_java:
         args = [self._to_map(a) if isinstance(a, dict) else a for a in args]
     try:
         method(*args)
     except:
         message, details = utils.get_error_details()
         LOGGER.error("Calling listener method '%s' of listener '%s' failed: %s"
                  % (method.__name__, self.name, message))
         LOGGER.info("Details:\n%s" % details)
Example #13
0
 def get_page(self):
     if self.page is None:
         LOGGER.info('Fetching page contents from Confluence')
         data = self.server.getPage(
             self.get_token(),
             self.settings.namespace,
             self.settings.pagename
         )
         self.page = ConfluencePage(data)
     return self.page
Example #14
0
def DebugFile(path):
    if path == 'NONE':
        LOGGER.info('No debug file')
        return None
    try:
        LOGGER.info('Debug file: %s' % path)
        return _DebugFileWriter(path)
    except:
        LOGGER.error("Opening debug file '%s' failed and writing to debug file "
                     "is disabled. Error: %s" % (path, utils.get_error_message()))
        return None
Example #15
0
 def return_os_version_name(self):
     """
     Returns an os version name. Uses versions.py to verify os_name
     """
     os_names = []
     # Gathers a list of osx version keys
     for key in OSX_VERSIONS:
         os_names.append(key)
         # Takes those keys and loops over the dictionary
     for os_name in os_names:
         if self.return_os_version() in OSX_VERSIONS[os_name]:
             LOGGER.info('Returning OS Name')
             return os_name
Example #16
0
 def fetch_cookies(cls):
     LOGGER.info('get cookies from reids')
     r = redis.Redis(connection_pool=cls.redis_pool)
     while True:
         user = r.spop('users')
         r.sadd('users', user)
         c = r.hget('account', user)
         if c:
             user_cookies = c.decode('utf-8')
             cookies_json = json.loads(user_cookies)
             LOGGER.info('cookies got-------')
             return cookies_json
         LOGGER.warn('cookies not get')
 def _import_listeners(self, listener_data):
     listeners = []
     for name, args in listener_data:
         try:
             listeners.append(_ListenerProxy(name, args))
         except:
             message, details = utils.get_error_details()
             if args:
                 name += ':' + ':'.join(args)
             LOGGER.error("Taking listener '%s' into use failed: %s"
                          % (name, message))
             LOGGER.info("Details:\n%s" % details)
     return listeners
Example #18
0
def log_request(record):
    global hpclient
    global dbh
    req = json.dumps(record)
    LOGGER.info(req)

    if hpclient and record['is_shellshock']:
        hpclient.publish(app.config['hpfeeds.channel'], req)
    if dbh and record['is_shellshock']:
        cursor = dbh.cursor()
        cursor.execute("INSERT INTO connections (method, url, path, query_string, headers, source_ip, source_port, dest_host, dest_port, is_shellshock, command, command_data, timestamp) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
                    (record['method'], record['url'], record['path'], record['query_string'], str(record['headers']), record['source_ip'],request.environ.get('REMOTE_PORT'), record['dest_host'], record['dest_port'], record['is_shellshock'], record['command'], record['command_data'], record['timestamp']) )
        dbh.commit()
Example #19
0
    def stop(self):
        """Stop the example by closing the channel and connection. We
        set a flag here so that we stop scheduling new messages to be
        published. The IOLoop is started because this method is
        invoked by the Try/Catch below when KeyboardInterrupt is caught.
        Starting the IOLoop again will allow the publisher to cleanly
        disconnect from RabbitMQ.

        """
        LOGGER.info('Stopping')
        self._stopping = True
        self.close_channel()
        self.close_connection()
Example #20
0
    def enable_delivery_confirmations(self):
        """Send the Confirm.Select RPC method to RabbitMQ to enable delivery
        confirmations on the channel. The only way to turn this off is to close
        the channel and create a new one.

        When the message is confirmed from RabbitMQ, the
        on_delivery_confirmation method will be invoked passing in a Basic.Ack
        or Basic.Nack method from RabbitMQ that will indicate which messages it
        is confirming or rejecting.

        """
        LOGGER.info('Issuing Confirm.Select RPC command')
        self._channel.confirm_delivery(self.on_delivery_confirmation)
Example #21
0
    def setup_exchange(self, exchange_name):
        """
        :param str|unicode exchange_name: The name of the exchange to declare

        """
        LOGGER.info('Declaring exchange %s', exchange_name)
        # Note: using functools.partial is not required, it is demonstrating
        # how arbitrary data can be passed to the callback when it is called
        cb = functools.partial(self.on_exchange_declareok,
                               userdata=exchange_name)
        self._channel.exchange_declare(exchange=exchange_name,
                                       exchange_type=self.EXCHANGE_TYPE,
                                       callback=cb)
Example #22
0
 def get_stats_for_all_matches(self):
     SQL = ("SELECT ID1_G, ID2_G, ID_T_G, DATE_G FROM (games_atp "
                 "INNER JOIN players_atp ON games_atp.ID1_G=players_atp.ID_P) "
                 "WHERE players_atp.COUNTRY_P<>'N/A' AND DATE_G IS NOT NULL "
                 "ORDER BY DATE_G DESC;")
     matches = self.execute_sql(SQL)
     dataset = []
     for m in matches:
         match = m
         match[3] = match[3].strftime(self.d_format)
         dataset.append(self.get_stats_for_match(*match))
     LOGGER.info("Collected data for %s matches.", len(dataset))
     return np.asarray(dataset)
Example #23
0
 def _import_listeners(self, listener_data):
     listeners = []
     for name, args in listener_data:
         try:
             listeners.append(_ListenerProxy(name, args))
         except:
             message, details = utils.get_error_details()
             if args:
                 name += ':' + ':'.join(args)
             LOGGER.error("Taking listener '%s' into use failed: %s" %
                          (name, message))
             LOGGER.info("Details:\n%s" % details)
     return listeners
    def upload_items(self):
        """Uploads each item of the instances items list"""
        for item in self.transfer_files:
            r = item.upload()
            if not r:
                log = "Failed to upload item {0}".format(item)
                LOGGER.error(log)
                return False

            log = "Successfully uploaded item {0}".format(item)
            LOGGER.info(log)

        return True
Example #25
0
    def execute_commands(self, commands: List[str]):
        """Попытка выполнить команды из списка commands.

        * commands - > это список строк, каждая строка,
            представляет из себя UNIX команду
        """
        for command in commands:
            _, stdout, _ = self.client.exec_command(command)
            stdout.channel.recv_exit_status(
            )  # Помещаем только итоговоый результат
            response = stdout.readlines()
            for line in response:
                LOGGER.info(f"INPUT: {command} | OUTPUT: {line}")
Example #26
0
    def bulk_upload(self, files: List[str]):
        """Upload multiple files to a remote directory.

        :param files: List of local files to be uploaded.
        :type files: List[str]
        """
        try:
            self.scp.put(files, remote_path=self.remote_path)
            LOGGER.info(
                f"Finished uploading {len(files)} files to {self.remote_path} on {self.host}"
            )
        except SCPException as e:
            raise e
Example #27
0
def compete(game, p1, p2, compete_round=100):
    compete_game = game()
    compete_game.add_player(p1)
    compete_game.add_player(p2)
    player_board = collections.defaultdict(int)
    for rd in range(compete_round):
        LOGGER.info("compete round[{}]".format(rd))
        compete_game.init_board()
        winner = compete_game.start()
        if winner is None:
            winner = "draw"
        player_board[winner] += 1
    LOGGER.info("compete result, player_board:{}".format(dict(player_board)))
Example #28
0
def get_hpfeeds_client(config):
    hpc = None
    if config["hpfeeds.enabled"].lower() == "true":
        LOGGER.info(
            "hpfeeds enabled, creating connection to {}:{}".format(config["hpfeeds.host"], config["hpfeeds.port"])
        )
        hpc = hpfeeds.new(
            config["hpfeeds.host"], int(config["hpfeeds.port"]), config["hpfeeds.identity"], config["hpfeeds.secret"]
        )
        hpc.s.settimeout(0.01)
    else:
        LOGGER.info("hpfeeds is disabled")
    return hpc
Example #29
0
 def train(self, game, fast_move_func):
     self.node_stack = []
     leaf_node = self.selection(self.root_node)
     is_finish, win_piece = game.judge(leaf_node.status)
     if not is_finish:
         action_list = game.get_action_list(leaf_node.status)
         self.expansion(node=leaf_node,
                        action_list=action_list,
                        transform_func=game.transform)
         win_piece = game.fast_finish(status=leaf_node.status,
                                      move_func=game.fast_move)
     LOGGER.info("{} win".format(win_piece))
     self.back_progression(win_piece)
Example #30
0
def main():
    with timer('load data'):
        df = pd.read_csv(TRAIN_PATH)[:10]
        df = df[df.Image != "ID_6431af929"].reset_index(drop=True)
        df.loc[df.pre_SOPInstanceUID == "ID_6431af929", "pre1_SOPInstanceUID"] = df.loc[
            df.pre_SOPInstanceUID == "ID_6431af929", "Image"]
        df.loc[df.post_SOPInstanceUID == "ID_6431af929", "post1_SOPInstanceUID"] = df.loc[
            df.post_SOPInstanceUID == "ID_6431af929", "Image"]
        df.loc[df.prepre_SOPInstanceUID == "ID_6431af929", "pre2_SOPInstanceUID"] = df.loc[
            df.prepre_SOPInstanceUID == "ID_6431af929", "pre1_SOPInstanceUID"]
        df.loc[df.postpost_SOPInstanceUID == "ID_6431af929", "post2_SOPInstanceUID"] = df.loc[
            df.postpost_SOPInstanceUID == "ID_6431af929", "post1_SOPInstanceUID"]
        y = df[TARGET_COLUMNS].values
        df = df[
            ["Image", "pre1_SOPInstanceUID", "post1_SOPInstanceUID", "pre2_SOPInstanceUID", "post2_SOPInstanceUID"]]
        ids = df["Image"].values
        gc.collect()

    with timer('preprocessing'):
        test_augmentation = Compose([
            CenterCrop(512 - 50, 512 - 50, p=1.0),
            Resize(img_size, img_size, p=1)
        ])

        test_dataset = RSNADatasetTest(df, img_size, IMAGE_PATH, id_colname=ID_COLUMNS,
                                    transforms=test_augmentation, black_crop=False, subdural_window=True,
                                    pick_type="pre_pre", n_tta=N_TTA)
        test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
        del df, test_dataset
        gc.collect()

    with timer('create model'):
        model = CnnModel(num_classes=N_CLASSES, encoder="se_resnext50_32x4d", pretrained="imagenet", pool_type="avg")
        model.load_state_dict(torch.load(model_path))
        model.to(device)
        model = torch.nn.DataParallel(model)

    with timer('predict'):
        pred = predict(model, test_loader, device, n_tta=N_TTA)
        pred = np.clip(pred, 1e-6, 1-1e-6)

    with timer('sub'):
        sub = pd.DataFrame(pred, columns=TARGET_COLUMNS)
        sub["ID"] = ids
        sub = sub.set_index("ID")
        sub = sub.unstack().reset_index()
        sub["ID"] = sub["ID"] + "_" + sub["level_0"]
        sub = sub.rename(columns={0: "Label"})
        sub = sub.drop("level_0", axis=1)
        LOGGER.info(sub.head())
        sub.to_csv("../output/{}_train.csv".format(EXP_ID), index=False)
Example #31
0
def main():
    args = argument_parser(TOOLS).parse_args()
    log.setLevel(LOG_LEVELS[args.loglevel])
    cdb = None
    ret = 0
    processcdb_config = ConfigParser()

    if args.dumpconfigs:
        for tool_name in TOOLS:
            tool = TOOLS[tool_name](tool_name)
            processcdb_config[tool_name] = tool.default_config()
        config_file = args.config.absolute()
        config_file.parent.mkdir(parents=True, exist_ok=True)
        if config_file.exists():
            log.warn(
                f"Configuration file {config_file} already exists, overwriting"
            )
        with config_file.open("w") as output:
            processcdb_config.write(output)
        log.info(f"Configuration file written to {config_file}")
        sys.exit(0)

    processcdb_config.read(args.config)
    try:
        tool = TOOLS[args.tool](args.tool, processcdb_config)
    except KeyError:
        log.error(f"Unknown tool '{args.tool}' - cant initilize")
        return 127

    if args.cdb.is_file():
        cdb = json.loads(args.cdb.read_text())
        if cdb:
            #if args.commit_a is not None:
            #cdb = filterByChangelist(cdb, (args.commit_a, args.commit_b))

            if not args.allow_dupes:
                cdb = remove_dupes(cdb)

            try:
                ret = tool.execute(cdb, args)
                log.info(f"Return value from tool process: {ret}")
            except EnvironmentError as e:
                log.error(f"Cant process: {e}")
            except Exception as e:
                log.error(f"{e}")
                traceback.print_exc()
        else:
            log.error(f"File '{args.cdb}' is empty")
    else:
        log.error(f"File '{args.cdb}' does not exist")
    return ret  # TODO: Add proper return codes
Example #32
0
def add_downloaded_image(num, link, pdf):
    """
    Download image, add it to PDF file and delete it
    """
    LOGGER.info(f'Download image № {num} from {link} from received URL.')

    filename, headers = urllib.request.urlretrieve(link)
    image_format = headers['content-type'].replace('image/', '')

    if image_format not in ('jpeg', 'png'):
        LOGGER.info(
            f"Image № {num} from {link} is not in an appropriate format.")
        add_image_link(num, link, pdf)
    else:
        LOGGER.info(f"Format of image № {num} from {link} is appropriate.")

        pdf.image(filename,
                  x=70,
                  y=pdf.get_y(),
                  h=40,
                  type=image_format,
                  link=link)
        pdf.ln(40)

        LOGGER.info(f'Delete downloaded image № {num} from {link}.')
        os.remove(filename)
Example #33
0
def main():
    with timer('load data'):
        df = pd.read_csv(TRAIN_PATH)
        df = df[df.Image != "ID_6431af929"].reset_index(drop=True)
        y = df[TARGET_COLUMNS].values
        df = df[["Image"]]
        gc.collect()

    with timer('preprocessing'):
        train_augmentation = Compose([
            CenterCrop(512 - 50, 512 - 50, p=1.0),
            HorizontalFlip(p=0.5),
            OneOf([
                ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                GridDistortion(p=0.5),
                OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            ], p=0.5),
            Rotate(limit=30, border_mode=0, p=0.7),
            Resize(img_size, img_size, p=1)
        ])

        train_dataset = RSNADataset(df, y, img_size, IMAGE_PATH, id_colname=ID_COLUMNS,
                                    transforms=train_augmentation, black_crop=False, subdural_window=True,
                                    conc_type="concat_all", conc_type2="concat_prepost")
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
        del df, train_dataset
        gc.collect()

    with timer('create model'):
        model = CnnModel(num_classes=N_CLASSES, encoder="se_resnext50_32x4d", pretrained="imagenet", pool_type="avg")
        if model_path is not None:
            model.load_state_dict(torch.load(model_path))
        model.to(device)

        criterion = torch.nn.BCEWithLogitsLoss(weight=torch.FloatTensor([2, 1, 1, 1, 1, 1]).cuda())
        optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, eps=1e-4)
        model = torch.nn.DataParallel(model)

    with timer('train'):
        for epoch in range(1, epochs + 1):
            if epoch == 5:
                for param_group in optimizer.param_groups:
                    param_group['lr'] = param_group['lr'] * 0.1
            seed_torch(SEED + epoch)

            LOGGER.info("Starting {} epoch...".format(epoch))
            tr_loss = train_one_epoch(model, train_loader, criterion, optimizer, device)
            LOGGER.info('Mean train loss: {}'.format(round(tr_loss, 5)))

            torch.save(model.module.state_dict(), 'models/{}_ep{}.pth'.format(EXP_ID, epoch))
Example #34
0
    def tap_read_cb(self, data):
        LOGGER.debug("MainControl tap_read_cb")

        # dns filter
        if is_dns_packet(data):
            LOGGER.debug("MainControl read dns packet")
            qnames = get_dns_qnames(data)
            for qname in qnames:
                if self.filter.match_domain(qname.decode()):
                    LOGGER.info("DNSServer domain matched: %s" % qname)
                    self.dns_server.resolve(data)
                    return

        self.client.send(data)
Example #35
0
def get_hpfeeds_client(config):
    hpc = None
    if config['hpfeeds.enabled'].lower() == 'true':
        LOGGER.info('hpfeeds enabled, creating connection to {}:{}'.format(config['hpfeeds.host'], config['hpfeeds.port']))
        hpc = hpfeeds.new(
            config['hpfeeds.host'], 
            int(config['hpfeeds.port']), 
            config['hpfeeds.identity'], 
            config['hpfeeds.secret']
        )
        hpc.s.settimeout(0.01)
    else:
        LOGGER.info( 'hpfeeds is disabled')
    return hpc
Example #36
0
 def return_home_dirs(self):
     """
     Returns a list of home directories.
     """
     # This will gather a list of home directories.
     directories = []
     path = '/Users'
     os.chdir(path)
     home_dirs = os.listdir(path)
     for directory in home_dirs:
         if not directory.startswith('.'):
             directories.append(directory)
     LOGGER.info('Returning home directories')
     return directories
Example #37
0
 def return_bash_history(self):
     """
     Returns a list of bash history
     """
     bash_history = []
     history = self.shell_cmd('history')
     try:
         if history:
             for line in history:
                 bash_history.append(line)
         LOGGER.info('Returning bash history')
         return bash_history
     except OSError:
         LOGGER.error('OSError')
Example #38
0
def main(seed):
    with timer('load data'):
        df = pd.read_csv(FOLD_PATH)

    with timer('preprocessing'):
        val_df = df[df.fold_id == FOLD_ID]
        val_augmentation = None

        val_dataset = SeverDatasetTest(val_df,
                                       IMG_DIR,
                                       IMG_SIZE,
                                       N_CLASSES,
                                       id_colname=ID_COLUMNS,
                                       transforms=val_augmentation)
        val_loader = DataLoader(val_dataset,
                                batch_size=BATCH_SIZE,
                                shuffle=False,
                                num_workers=8)

        del val_df, df, val_dataset
        gc.collect()

    with timer('create model'):
        models = []
        model = smp_old.Unet('resnet34',
                             encoder_weights="imagenet",
                             classes=N_CLASSES,
                             encoder_se_module=True,
                             decoder_semodule=True,
                             h_columns=False,
                             skip=True,
                             act="swish",
                             freeze_bn=True,
                             classification=CLASSIFICATION)
        model = convert_model(model)
        if base_model is not None:
            model.load_state_dict(torch.load(base_model))
        model.to(device)
        models.append(model)

    with timer('predict'):
        rles, sub_ids = predict(models, val_loader, device)
        sub_df = pd.DataFrame({
            'ImageId_ClassId': sub_ids,
            'EncodedPixels': rles
        })
        LOGGER.info(sub_df.head())

        sub_df.to_csv('{}_{}.csv'.format(EXP_ID, FOLD_ID), index=False)
Example #39
0
def perform_commands(headers):
    for name, value in headers:
        mat = ping_check_re.search(value)
        if mat:
            # do ping
            ping = mat.groupdict()
            # don't do more than 20 pings
            count = min(20, int(ping.get('count', 1)))
            host = ping['host']
            LOGGER.info('Performing {} pings against {}'.format(count, host))

            # host must match an IP regex and count must be a number, prevents command injection here
            command = ['ping', '-n', '-c', str(count), host]
            try:
                subprocess.call(command)
            except Exception as e:
                LOGGER.error(e)
            return ' '.join(command), ''
            
        mat = wget_check_re.search(value)
        if mat:
            return web_request('wget', mat.groupdict()['url'])

        mat = wget_check_re2.search(value)
        if mat:
            return web_request('wget', 'http://'+mat.groupdict()['url'])

        mat = curl_check_re.search(value)
        if mat:
            return web_request('curl', mat.groupdict()['url'])

        mat = curl_check_re2.search(value)
        if mat:
            return web_request('curl', 'http://'+mat.groupdict()['url'])

        mat = telnet_check_re.search(value)
        if mat:
            telnet = mat.groupdict()
            try:
                host = telnet['host']
                port = telnet['port']
                LOGGER.info('Openning socket to {}:{}'.format(host, port))
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((host, int(port)))
                s.close()
            except Exception as e:
                LOGGER.error(e)
            return 'telnet {}'.format(host, port), ''
    return None, None
Example #40
0
def perform_commands(headers):
    for name, value in headers:
        mat = ping_check_re.search(value)
        if mat:
            # do ping
            ping = mat.groupdict()
            # don't do more than 20 pings
            count = min(20, int(ping.get('count', 1)))
            host = ping['host']
            LOGGER.info('Performing {} pings against {}'.format(count, host))

            # host must match an IP regex and count must be a number, prevents command injection here
            command = ['ping', '-n', '-c', str(count), host]
            try:
                subprocess.call(command)
            except Exception as e:
                LOGGER.error(e)
            return ' '.join(command), ''

        mat = wget_check_re.search(value)
        if mat:
            return web_request('wget', mat.groupdict()['url'])

        mat = wget_check_re2.search(value)
        if mat:
            return web_request('wget', 'http://' + mat.groupdict()['url'])

        mat = curl_check_re.search(value)
        if mat:
            return web_request('curl', mat.groupdict()['url'])

        mat = curl_check_re2.search(value)
        if mat:
            return web_request('curl', 'http://' + mat.groupdict()['url'])

        mat = telnet_check_re.search(value)
        if mat:
            telnet = mat.groupdict()
            try:
                host = telnet['host']
                port = telnet['port']
                LOGGER.info('Openning socket to {}:{}'.format(host, port))
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((host, int(port)))
                s.close()
            except Exception as e:
                LOGGER.error(e)
            return 'telnet {}'.format(host, port), ''
    return None, None
Example #41
0
def train_one_epoch(model,
                    train_loader,
                    criterion,
                    optimizer,
                    device,
                    accumulation_steps,
                    total_step,
                    n_labels,
                    base_lr,
                    steps_upd_logging=500,
                    gamma=None):
    model.train()
    optimizer.zero_grad()

    total_loss = 0.0
    train_losses = []
    for step, (features, targets) in enumerate(train_loader):
        features = trim_tensors(features)
        features, targets = features.to(device), targets.to(device)
        logits = model(features)

        if n_labels == 1:
            loss = criterion(logits.view(-1, 1), targets.view(-1, 1))
        else:
            loss = criterion(logits, targets)
        with amp.scale_loss(loss, optimizer) as scaled_loss:
            scaled_loss.backward()

        if (step + 1
            ) % accumulation_steps == 0:  # Wait for several backward steps
            optimizer.step()  # Now we can do an optimizer step
            optimizer.zero_grad()

        if gamma is not None and step == int(total_step / 2):
            for param_group in optimizer.param_groups:
                param_group['lr'] = base_lr * gamma

        if step == 80000:
            return total_loss / (step + 1), train_losses

        total_loss += loss.item()

        if (step + 1) % steps_upd_logging == 0:
            train_losses.append(total_loss / (step + 1))
            LOGGER.info(
                f'Train loss on step {step + 1} was {round(total_loss / (step + 1), 5)}'
            )

    return total_loss / (step + 1), train_losses
Example #42
0
 def find_ip(self):
     arg = 'ip route list'
     # time to get IP varies on each reboot, so try to find IP continuously
     while (True):
         try:
             p = subprocess.Popen(arg, shell=True, stdout=subprocess.PIPE)
             data = p.communicate()
             split_data = data[0].split()
             self.ip_addr = split_data[split_data.index('src') + 1]
             self.ext_ipaddr = urllib2.urlopen("http://icanhazip.com").read().strip()
             break
         except:
             continue  
     
     LOGGER.info('Local IP:,{}'.format(self.ip_addr))
Example #43
0
def log_email(msg, error=False):

    if error:
        sys.stderr.write(f"{msg}\n")
        # send_email(
        #     config.err_email_list,
        #     body,
        #     subject="Archiver Error",
        #     to="*****@*****.**",
        # )
        LOGGER.error(msg)
    else:
        LOGGER.info(msg)
    print(msg)
    return
Example #44
0
 def stop(self):
     LOGGER.info("Controller stop")
     for _, value in self.id_to_server.items():
         value.stop()
     self.running = False
     if self.recv_thread is not None:
         while self.recv_thread.is_alive():
             time.sleep(1)
     self.sock.close()
     if self.timeout_thread is not None:
         while self.timeout_thread.is_alive():
             time.sleep(1)
     if self.handle_traffic_thread is not None:
         while self.handle_traffic_thread.is_alive():
             time.sleep(1)
Example #45
0
 def __init__ (self) :
     """
         Create things for synchro, get the sensors from the DB
     """
     self.lock=threading.Lock()#Lock use for DB updates
     self.soc = socket.socket()
     self.stoppedAnalyze=False
     self.trameUsed = ''
     self.running=True
     self.identSet = set([])
     lazzyUpdate.drop_collection()
     LOGGER.info("initializing sensors set : ")
     for lsensor in sensor.Sensor.objects:
                 self.identSet.add(lsensor.physic_id)
                 LOGGER.info(lsensor.physic_id)
Example #46
0
def get_last_model(model_name, from_step=False):
    LOGGER.debug(f'Model checkpoint directory: {os.path.join(constants.CHECKPOINT_DIR, model_name)}')
    if from_step:
        model_paths = glob(os.path.join(constants.CHECKPOINT_DIR, model_name) + '/train_step-*.model')
        LOGGER.debug(f'Model paths: {model_paths}')
        last_step = max([int(os.path.basename(mp)[len('train_step-'):-len('.model')]) for mp in model_paths])
        last_model_path = os.path.join(constants.CHECKPOINT_DIR, model_name, f'train_step-{last_step}.model')
    else:
        model_paths = glob(os.path.join(constants.CHECKPOINT_DIR, model_name) + '/[!train_step-]*.model')
        LOGGER.debug(f'Model paths: {model_paths}')
        last_iter = max([int(os.path.basename(mp)[: -len('.model')]) for mp in model_paths])
        last_model_path = os.path.join(constants.CHECKPOINT_DIR, model_name, f'{str(last_iter).zfill(6)}.model')
    LOGGER.info(f'Loading {last_model_path} (last)')
    model = torch.load(last_model_path)
    return model
Example #47
0
 def get_stories_data(self):
     result = []
     for row in self.soup.findAll('tr')[1:]:
         if len(row.findAll('td')) != 12:
             fail('column count is incorrect')
         cols = row.findAll('td')
         if cols[2].find('a') is None:
             fail('story key is not a hyperlink')
         else:
             result.append({
                 'key': str(cols[2].find("a").text),
                 'status': str(cols[6].text.encode('ascii', 'ignore'))
             })
     LOGGER.info('Table contains %s stories' % len(result))
     return result
Example #48
0
    def __init__(self, train_df, demand_non_empty_proba):
        assert demand_non_empty_proba > 0, 'frequensy of non-empty images must be greater then zero'
        self.positive_proba = demand_non_empty_proba

        self.train_df = train_df.reset_index(drop=True)

        self.positive_idxs = self.train_df[
            self.train_df.sum_target != 0].index.values
        self.negative_idxs = self.train_df[self.train_df.sum_target ==
                                           0].index.values

        self.n_positive = self.positive_idxs.shape[0]
        self.n_negative = int(self.n_positive * (1 - self.positive_proba) /
                              self.positive_proba)
        LOGGER.info("len data = {}".format(self.n_positive + self.n_negative))
Example #49
0
    def publish_message(self, message):
        if self._channel is None or not self._channel.is_open:
            LOGGER.warning('Channel is not open, could not publish event',
                           json.dumps(message, cls=UUIDEncoder))
            return

        properties = pika.BasicProperties(content_type='application/json')

        self._channel.basic_publish(
            self.EXCHANGE, self.ROUTING_KEY,
            json.dumps(message, ensure_ascii=False, cls=UUIDEncoder),
            properties)
        self._message_number += 1
        self._deliveries.append(self._message_number)
        LOGGER.info('Published message # %i', self._message_number)
Example #50
0
    def execute(self, cdb, args):
        result = 1
        if not self.tool_exists():
            raise EnvironmentError(f"tool: {self.tool_name} not in path, cannot execute.")

        command_queue = self._generate_cmd_queue(cdb, args)
        try:
            tmp_dir = None
            if args.output is not None:
                tmp_dir = Path(tempfile.mkdtemp())

            tasks = self.max_tasks(args, len(command_queue))
            global list_of_futures
            signal.signal(signal.SIGINT, inthandler)

            log.info("Starting scanning ...")
            with concurrent.futures.ProcessPoolExecutor(tasks) as executor:
                for cmd in command_queue:
                    future = executor.submit(self.process_queue, cmd, tmp_dir)
                    future.add_done_callback(partial(process_cb, cmd))
                    future = list_of_futures.append(future)

            log.info("Waiting for scanning processes to finish ..")
            concurrent.futures.wait(list_of_futures)
            log.info("Scanning done ...")

            if args.output is not None:
                log.debug(f"User requested output to {args.output}, scanning log files and saving ...")
                with open(args.output, "w") as dst:
                    for name in tmp_dir.glob("*.log"):
                        dst.write(name.read_text())

                if args.xml:
                    log.debug("User requested output as xml, converting ...")
                    self.format_output_to_xml(args.output, args.allow_dupes)
                shutil.rmtree(tmp_dir)
            log.info("All done")
            result = 0
        except KeyboardInterrupt:
            log.debug("Keyboard interrupt!")
            if tmp_dir is not None:
                shutil.rmtree(tmp_dir)
            os.kill(0, 9)
        except Exception as e:  # TODO: Add proper exception handling
            log.error(f"Exception: {e} .-- ")
            result = 1
        log.info("All done")
        return result
def anonymize_csv_wrapper(input_csv, output_folder, anon_csv_name, hash_df,
                          dataset):
    if not os.path.exists(output_folder):
        try:
            os.makedirs(output_folder)
            LOGGER.info('Output directory %s is created' % output_folder)
        except OSError:
            LOGGER.warning('Creation of the output directory %s failed' %
                           output_folder)
    if hash_df == 'sha224':
        hash_method = 'sha3'
    elif hash_df == 'md5':
        hash_method = 'md5'
    output_path = os.path.join(output_folder, anon_csv_name)
    anonymize_csv(input_csv, output_path, columns=[0], method=hash_method)
    LOGGER.info('Anonymized csv is saved in %s' % output_path)
Example #52
0
 def translateTrame(self,inTrame):
     """
     return  close if data0=09, 
             open if data0=08 
             else nothing
     """
     if (inTrame.data0=='09'):
                     LOGGER.info("Door sensor {} with state [close]".format(inTrame.ident))
                     dataToRet = "close"
     elif(inTrame.data0=='08'):
         LOGGER.info("Door sensor {} with state [open]".format(inTrame.ident))
         dataToRet = "open"
     else:
         LOGGER.warn("Door sensor {}Strange state : {}".format(inTrame.ident, inTrame.data2))
         dataToRet=''
     return dataToRet
Example #53
0
 def launch(self,addr,port):
     self.connect(addr,port)
     dacount=0
     self.updateIdentSet()
     while self.running:
         try:
             if dacount >200 : #Fréquence de mise à jour de la base
                 self.updateIdentSet()
                 dacount=0
             try:
                 self.receive()
             except IOError:
                 # timeout !
                 pass
             if self.trameUsed:
                 self.checkTrame()
             dacount+=1
             # LOGGER.debug("tic")
         except KeyboardInterrupt:
             sys.exit(0)
     LOGGER.info("Le traducteur est terminé")
Example #54
0
 def updateIdentSet(self):
     """
         Safely update the identifier set of the traductor
     """
     for anUpdate in lazzyUpdate.objects:
         LOGGER.warn("id : {} || state : {}".format(anUpdate.idToUpdate,anUpdate.newState))
         if(anUpdate.idToUpdate==""):
             with self.lock:
                 self.identSet=set([])
                 for lsensor in sensor.Sensor.objects:
                     self.identSet.add(lsensor.physic_id)
                     LOGGER.info(lsensor.physic_id)
                 LOGGER.info("Traductor's set of captors updated")
         elif(anUpdate.newState==""):
             with self.lock:
                 if (anUpdate.idToUpdate in things.physic_id for things in sensor.Sensor.objects):
                     self.identSet.add(anUpdate.idToUpdate)
                     LOGGER.info("{} added".format(anUpdate.idToUpdate))
         else:
             #send a trame from a captor with a newState
             LOGGER.error("Sensor to update : {} ||new state : {}".format(anUpdate.idToUpdate,anUpdate.newState))
             self.sendTrame(anUpdate.idToUpdate,anUpdate.newState)
         anUpdate.delete()
         LOGGER.warn(" {} update           GROS delete de : {} || {}".format(lazzyUpdate.objects.count(),anUpdate.idToUpdate,anUpdate.newState))
         return 
     LOGGER.debug("nothing to update")
Example #55
0
def get_postgresql_handler(config):
    dbh = None
    if config["postgresql.enabled"].lower() == "true":
        LOGGER.info(
            "postgresql enabled, creating connection to {}:{}".format(
                config["postgresql.host"], config["postgresql.port"]
            )
        )
        dbh = psycopg2.connect(
            database=config["postgresql.database"],
            user=config["postgresql.user"],
            password=config["postgresql.password"],
            host=config["postgresql.host"],
            port=config["postgresql.port"],
        )
        cursor = dbh.cursor()
        cursor.execute(
            """CREATE TABLE IF NOT EXISTS 
                            connections	(
                            connection SERIAL PRIMARY KEY,
                            method TEXT, 
                            url TEXT, 
                            path TEXT, 
                            query_string TEXT,
                            headers TEXT,
                            source_ip TEXT,
                            source_port INTEGER,  
                            dest_host TEXT,
                            dest_port INTEGER, 
                            is_shellshock TEXT,
                            command TEXT,
                            command_data TEXT,
                            timestamp INTEGER
        );"""
        )
        dbh.commit()
    else:
        LOGGER.info("postgresql is disabled")
    return dbh
Example #56
0
def check_ip():
    Prev_IP = None
    with open('IP_Logger.csv', 'r+') as csv_file:
        # check if prev ip exists
        lines = csv_file.readlines()
        for i in range(-1, -len(lines) - 1, -1):
            if "Local IP" in lines[i]:
                Prev_IP = [x.strip() for x in lines[i].split(',')][-1]
                break
            else:
                continue

    #print "OldIP: {}".format(Prev_IP)

    Send_IP = Find_IP(user, password, to, args.pi_name)
    Send_IP.find_ip()
    New_IP = Send_IP.ip_addr
    #print "NewIP: {}".format(New_IP)
    if New_IP != Prev_IP:
        LOGGER.info('Email sent with new ip: {}'.format(New_IP))
        Send_IP.send_mail()
    else:
        LOGGER.info("IP Address hasn't changed")
Example #57
0
 def connect (self, addr, port) :
     self.soc.connect((addr,port))
     self.soc.setblocking(0)
     LOGGER.info("Connected to {} : {}".format(addr,port))
Example #58
0
 def fsync(self, path, isfsyncfile = 0):
     LOGGER.info("Fsync %s %s" % (path, isfsyncfile))
     return self.flush (path, 0)
Example #59
0
                        self.identSet.add(lsensor.physic_id)
                        LOGGER.info(lsensor.physic_id)
                    LOGGER.info("Traductor's set of captors updated")
            elif(anUpdate.newState==""):
                with self.lock:
                    if (anUpdate.idToUpdate in things.physic_id for things in sensor.Sensor.objects):
                        self.identSet.add(anUpdate.idToUpdate)
                        LOGGER.info("{} added".format(anUpdate.idToUpdate))
            else:
                #send a trame from a captor with a newState
                LOGGER.error("Sensor to update : {} ||new state : {}".format(anUpdate.idToUpdate,anUpdate.newState))
                self.sendTrame(anUpdate.idToUpdate,anUpdate.newState)
            anUpdate.delete()
            LOGGER.warn(" {} update           GROS delete de : {} || {}".format(lazzyUpdate.objects.count(),anUpdate.idToUpdate,anUpdate.newState))
            return 
        LOGGER.debug("nothing to update")




if __name__ == '__main__':
    connect('test')
    try:
        myTrad=traductor()
        myTrad.launch('',1515)
    except socket.error:
        LOGGER.info("Déconnection du serveur")
        mytrad.soc.close()
    finally:
        pass