Пример #1
0
    def load_logger(self, verbose):
        if not verbose:
            lg = Logger(level='ERROR')
        else:
            lg = Logger()

        return lg.get()
    def process(self):
        filepath = os.getcwd() + '/watch/' + self.filename
        if os.path.isfile(filepath):

            print("Muncipality import started")

            conn = pyodbc.connect(
                r'Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=' +
                filepath)
            cursorAD = conn.cursor()
            cursorAD.execute('SELECT * FROM GEMEENTEN')

            # Create cursor queries
            header = "SET IDENTITY_INSERT muncipality ON " \
                     "INSERT INTO muncipality (muncipality_code, muncipality_name) VALUES "

            cursor = self.db.conn.cursor()
            Logger().errors['Muncipalities'] = dict()
            for row in cursorAD.fetchall():
                query = header + "("
                query += "'" + str(row[0]) + "', "
                query += "'" + row[1] + "'"
                query += ")"

                try:
                    cursor.execute(query)
                except Exception as e:
                    code = e.args[0]
                    if code in Logger().errors['Muncipalities']:
                        Logger().errors['Muncipalities'][code] += 1
                    else:
                        Logger().errors['Muncipalities'][code] = 1

            cursor.commit()
Пример #3
0
    def run(self, parser=2, downloader=2):
        self._logger.info('이미지 다운로드 작업 시작')
        start = time.time()

        # 멀티 프로세싱 처리를 위한 매니저
        with Manager() as manager:
            # 프로세스 목록
            processes = []

            # 공유 메모리 변수
            content_list = manager.list()
            image_list = manager.list()
            count = manager.Value('i', 0)
            lock = manager.Lock()
            feeder_running = manager.Value('i', 1)
            parser_running = manager.Value('i', 1)

            parser_logger = Logger('cybot_parser.log')
            downloader_logger = Logger('cybot_downloader.log')
            main_cookies = self._driver.get_cookies()
            cookie = []

            for c in main_cookies:
                cookie.append({'name': c['name'], 'value': c['value']})

            # 파서 프로세스 생성 및 시작
            for idx in range(parser):
                parser_instance = Parser(self._chromedriver, cookie,
                                         parser_logger, self._delay)
                parser_process = Process(target=parser_instance.parse, \
                    args=(content_list, image_list, feeder_running, parser_running)
                )
                parser_process.name = 'Parser::' + str(idx)
                parser_process.start()
                processes.append(parser_process)
                self._logger.info('Parser', str(idx), '프로세스 시작')

            # 다운로더 프로세스 생성 및 시작
            for idx in range(downloader):
                downloader_instance = Downloader(downloader_logger)
                downloader_process = Process(target=downloader_instance.downloader, \
                    args=(image_list, count, lock, parser_running))
                downloader_process.name = 'Downloader::' + str(idx)
                downloader_process.start()
                processes.append(downloader_process)
                self._logger.info('Downloader', str(idx), '프로세스 시작')

            # 피더 프로세스 시작
            self._logger.info('Feeder 시작')
            self.feeder(content_list, feeder_running)

            # 파서, 다운로더 프로세스가 종료되지않은 경우 대기
            for p in processes:
                p.join()

            self._logger.info('작업 소요시간: {}초'.format(
                round(time.time() - start, 2)))
            self._logger.info('전체 이미지 수: {}'.format(count.value))
 def execute(self, query):
     try:
         self.cursor.execute(query)
         self.conn.commit()
     except pyodbc.DataError as e:
         Logger().error(e)
     except pyodbc.OperationalError as e:
         Logger().error(e)
     except pyodbc.DatabaseError as e:
         Logger().error(e)
     except pyodbc.Error as e:
         error_log = repr(e).split(';')
         for error in error_log:
             Logger().error(error)
Пример #5
0
 def __init__(self, logger: AbstractLogger = Logger()):
     if WebSocketClient.__instance:
         raise Exception('Use get_instance() instead!')
     self.__ws = WebSocketApp(
         'wss://jqbx.fm/socket.io/?EIO=3&transport=websocket')
     self.__logger = logger
     WebSocketClient.__instance = self
Пример #6
0
def main():
    """
    Wraps the main logic of the program into one function.
    """
    args = handle_args()

    # determine log level
    logger = Logger(verbosity=4)
    logger.verbosity = 4 if args.verbose else 0

    # logger up TMP Client
    logger.log("TMPCLI", "INFO", "Creating TMP Client")
    client = TmpClient(
        args.server,
        args.port,
        logger,
        args.business_type,
        args.version,
    )
    if args.use_tdp:
        client.use_tdp = True
    logger.log("TMPCLI", "INFO", "Sending payload")
    if args.use_tdp:
        client.send(args.opcode, args.payload, args.packet_type)
    else:
        client.send(args.opcode, args.payload)
Пример #7
0
    def test_did_processed(self):
        logger = Logger()
        category = 'ACTL2F'
        true_file_name = '763a8z0163c5aau04_20181220_123945_-350694_-693875.jpg'
        processed_file_name = '763a8z0163c7aag03_20181220_124557_-516891_231788.jpg'
        false_file_name = '111a8z0163c5aau04_20181220_123945_-350694_-693875.jpg'

        output_normal_path, output_defect_path = create_image_output_dir(self.output_path, category)

        true_result = did_processed(
            true_file_name,
            category,
            output_normal_path, output_defect_path,
            logger=logger
        )

        self.assertIs(true_result, True, "Result True")

        false_result = did_processed(
            false_file_name,
            category,
            output_normal_path, output_defect_path,
            logger=logger
        )

        self.assertIs(false_result, False, "Image False")

        true_result = did_processed(
            processed_file_name,
            category,
            output_normal_path, output_defect_path,
            logger=logger
        )

        self.assertIs(true_result, True, "Processed True")
Пример #8
0
def main():
    """Gets config file, loads specified components, and runs experiment."""
    # Get overall config dict.
    parser = ArgumentParser()
    parser.add_argument("config")
    config_file = parser.parse_args().config
    with open(config_file) as f:
        config = yaml.safe_load(f)

    # Initialize logger.
    run_name = get_run_name(config)
    print(f"Running {run_name}")
    logger = Logger(config["logger"], run_name, config_file)
    logger.p(f"Using config file {config_file}.")

    logger.p("Loading model...")
    model = load_model(config["model"])

    logger.p("Loading dataset...")
    dataset = load_dataset(config["dataset"])

    logger.p("Loading metrics...")
    metric_computers = load_metrics_computers(config["metrics"])

    logger.p("Running experiment...")
    run_experiment(model, dataset, logger, metric_computers,
                   config["experiment"])

    logger.p("Done! Closing logger...")
    logger.close()

    print("Have a nice day!")
Пример #9
0
def main():
    global all_data

    clear()
    start_mess = '''
    |---------------------|
    |       by rady       |
    |                     |
    |    UserDetective    |
    |                     |
    |       by rady       |
    |---------------------|

    '''
    print(Fore.GREEN + start_mess)

    name = input(Fore.YELLOW + '[*] Enter Username: '******'\n[*] Wait...\n')
    for site in sites:
        res = check_accounts(site, name)
        if res == 200:
            result = '[+] ' + sites[site] + ': ' + site + name
            all_data += result + '\n'
            print(Fore.GREEN + result)
        else:
            result = '[-] ' + sites[site]
            all_data += result + '\n'
            print(Fore.RED + result)
    
    log = Logger(all_data)
    print(Fore.WHITE)

    input()
Пример #10
0
    def migrate_product_data(self):
        db = Database()
        start_time = round(time.time() * 1000)

        print('Starting products staging table data migration to target')
        db.execute("SET NOCOUNT ON exec ImportCategoryData")
        db.execute("SET NOCOUNT ON exec ImportIngredientData")
        db.execute("SET NOCOUNT ON exec ImportSauceData")
        db.execute("SET NOCOUNT ON exec ImportCrustData")
        db.execute("SET NOCOUNT ON EXEC ImportPizzaData")
        db.execute("SET NOCOUNT ON exec ImportOtherProductData")
        print('products data migration to target done\n')

        engine = src.dbEbgine().get_db_engine()
        try:
            error_dataframe = pd.read_sql(
                "SELECT * FROM product_import_error_log", engine)
            if len(error_dataframe) > 0:
                print('Product migration complete with ' +
                      str(len(error_dataframe)) + ' errors in ' +
                      str(round(time.time() * 1000) - start_time) +
                      ' seconds. See error logs for details.\n')
                error_string = "Product migration errors found: \n" \
                               + self.__indent(error_dataframe.to_string(),
                                               30)

                Logger().error(error_string)
            else:
                print('Product migration complete with no errors in ' +
                      str(round(time.time() * 1000) - start_time) +
                      'seconds.\n')
        except Exception:
            print("No import errors found")

        return round(time.time() * 1000) - start_time
Пример #11
0
    def __init__(self, user, data):

        self.logger = Logger(user)

        super().__init__('sessions/' + user,
                         API_ID,
                         API_HASH,
                         update_workers=4)

        self.phone = PHONE

        self.chats = {}

        self.state = State.IDLE

        self.level = 0

        self.exhaust = time.time()

        # Adventure quest location
        self.adventure = None

        # All locations
        self.locations = create_locations()

        self.logger.log('Сеанс {} открыт'.format(user))
    def clean_prices(self, dataframe, column_name, filename):
        currency_conversion_error_df = dataframe.rename(columns={column_name: 'Violating Character'})
        price_conversion_df = dataframe

        # Find alphanumerical characters filtered out by regex (except euro character) and place into error dataframe
        currency_conversion_error_df['Violating Character'] = currency_conversion_error_df[
            'Violating Character'].str.findall(r"[a-zA-Z]")

        # Filter out empty string arrays returned by regex findall
        currency_conversion_error_df = currency_conversion_error_df[
            currency_conversion_error_df['Violating Character'].str.len() > 0]
        # print(len(currency_conversion_error_df))
        if len(currency_conversion_error_df) > 0:
            error_string = "Price conversion error found in: " + filename + "\n" \
                           + self.__indent(currency_conversion_error_df.to_string()
                                           + "\nViolating character is removed and resulting price will be migrated to target.",
                                           30)

            Logger().error(error_string)

        # Remove alphanumeric characters from prices.
        price_conversion_df['Extra Price'] = price_conversion_df['Extra Price'].str.replace(r"[a-zA-Z€ ]", '', regex=True)
        price_conversion_df["Extra Price"] = pd.to_numeric(price_conversion_df["Extra Price"])
        # uniform capitalization of ingredient names.
        price_conversion_df['Ingredient'] = price_conversion_df['Ingredient'].str.title()
def main():
    # Loop through 'watch' directory
    files = os.listdir("watch")
    if files:

        # Start processing
        run_time = PizzaIngredientsStagingTableBuilder(
            'pizza_ingredienten.xlsx', 'Extra Ingredienten.csv').process()
        run_time += ExtraIngredientsStagingTableBuilder(
            'Extra Ingredienten.csv').process()
        run_time += PizzaCrustsStagingTableBuilder(
            'pizzabodems.xlsx').process()
        run_time += OtherProductsStagingTableBuilder(
            'Overige Producten.xlsx').process()

        print('All staging tables built in ' + str(run_time) + ' ms. \n')

        run_time += ProductsMigration().migrate_product_data()

        print('Product data migrated in ' + str(run_time) + ' milliseconds.\n')

        Muncipality("Postcode tabel.mdb").process()
        ZipCode("Postcode tabel.mdb").process()
        Shop("Winkels Mario.txt").process()
        OrderData("MarioOrderData01_10000.csv")
        OrderData("MarioOrderData02_10000.csv")
        OrderData("MarioOrderData03_10000.csv")
        OrderData("MarioOrderData04_10000.csv")
        # # Move files to 'complete' directory
        # for file in files:
        #     move_file(file)

        Logger().commit_errors()
Пример #14
0
    def send_mail(self, to, subject, body, attachments=None):
        msg = MIMEMultipart()
        msg['From'] = self.user
        msg['To'] = ', '.join(to)
        msg['Date'] = formatdate(localtime=True)
        msg['Subject'] = subject
        msg.attach(MIMEText(body, 'plain'))

        if attachments is not None:
            for attachment in attachments:
                part = MIMEBase('application', 'octet-stream')
                part.set_payload(attachment[1].read())
                encoders.encode_base64(part)
                part.add_header('Content-Disposition',
                                'attachment; filename = ' + attachment[0])
                msg.attach(part)

        content = msg.as_string()
        try:
            self.server.sendmail(self.user, to, content)
        except Exception as error:
            logger = Logger()
            logger.send(
                f'An Error occurred while sending the mail! ERROR: {error}')
            sys.exit(6)
Пример #15
0
    def __init__(self, platform_name: str = "", full=True):
        """
        Set up the platform and, if needed, create the Suprenam's data folder.

        Args:
            platform: the current OS, only provided during testing.
        Raises:
            UnsupportedOSError: if the current OS is not supported.
        """
        self.platform = platform_name or get_platform_long_string().partition("-")[0]

        if self.platform == "macOS":
            self.workspace = Path.home() / "Library" / "Application Support" / "Suprenam"
        elif self.platform == "Linux":
            self.workspace = Path.home() / ".suprenam"
        elif self.platform == "Windows":
            self.workspace = Path.home() / "AppData" / "Roaming" / "Suprenam"
        elif self.platform == "mockOS":
            self.workspace = Path("test") / "workspace"
        else:
            raise UnsupportedOSError(f"Unsupported operating system: {self.platform}.")

        self.workspace.mkdir(
            parents=True,  # any missing parents of this path are created as needed
            exist_ok=True,  # if the directory already exists, do not raise an exception
        )

        config_path = self.workspace / "config.json"
        if not config_path.is_file():
            config_path.write_text(json.dumps(self.DEFAULT_CONFIG, indent=4))
        self.config = json.loads(config_path.read_text())

        self.logger = Logger(self)

        self.print_ = Printer(self)
Пример #16
0
    def __init__(self,
                 experiment_name: str,
                 train_data_path: Path,
                 test_data_path: Path,
                 greyscale: bool,
                 epochs: int,
                 batch_size: int,
                 image_shape: tuple,
                 cache: bool,
                 histogram_freq: int,
                 update_freq: str,
                 profile_batch: int,
                 learning_rate: float,
                 loss: str,
                 optimizer: str,
                 padding: str,
                 hidden_activation: str,
                 output_activation: str,
                 units: int,
                 hyperparams=None):
        self.greyscale = greyscale
        self.train_data_path = train_data_path
        self.test_data_path = test_data_path
        self.cache = cache
        self.cache_files_dir = data_processed_dir / experiment_name
        if not cache:
            shutil.rmtree(self.cache_files_dir, ignore_errors=True)
            self.cache_files_dir.mkdir(parents=True)
            proccesed_train_dataset_file_name = f"{train_data_path.parent.stem}{train_data_path.stem}.tfcache"
            self.proccesed_train_dataset_file_path = self.cache_files_dir / proccesed_train_dataset_file_name
            proccesed_test_dataset_file_name = f"{test_data_path.parent.stem}{test_data_path.stem}.tfcache"
            self.proccessed_test_dataset_file_path = self.cache_files_dir / proccesed_test_dataset_file_name
        else:
            self.proccesed_train_dataset_file_path = None
            self.proccessed_test_dataset_file_path = None

        self.epochs = epochs
        self.number_of_channels = utils.get_number_of_channels(greyscale)
        self.image_shape = image_shape
        self.input_shape = utils.get_input_shape(self.image_shape, self.number_of_channels)
        self.batch_size = batch_size
        self.hyperparams = hyperparams

        # Logger
        self.logger = Logger(logs_dir=project_path / 'logs' / experiment_name,
                             models_dir=project_path / 'models' / experiment_name,
                             cache_files_dir=self.cache_files_dir,
                             hyperparams=hyperparams,
                             histogram_freq=histogram_freq,
                             profile_batch=profile_batch,
                             update_freq=update_freq)

        self.optimizer = optimizer
        self.learning_rate = learning_rate
        self.loss = loss
        self.padding = padding
        self.hidden_activation = hidden_activation
        self.output_activation = output_activation
        self.units = units
Пример #17
0
def main():
    log = Logger(dir_log + "test.log")
    print(log._timestamp())
    log._log('일반적인 설명문장을 로그에 기록 합니다.')

    log.info('정보에 관한 로그문장을 로그에 기록 합니다.')
    log.error('에러발생시 에러메시지를 로그에 기록 합니다.')
    log.warning('경고 발생시 메시지를 로그에 기록 합니다.')
Пример #18
0
    def __import_adress(self, lines):

        # Create cursor queries
        header = "INSERT INTO adress (zipcode_id, house_nr, house_nr_addition) VALUES "

        cursor = self.db.get_connection().cursor()
        Logger().errors['Restaurants'] = dict()

        for key, value in lines.items():

            # Get adres info
            addressInfo = self.__get_adres_info(value[:-1])

            index = 0
            if (int(addressInfo['house_nr'].split('-')[0]) % 2) == 0:
                index = 1

            zipQuery = "SELECT id FROM zipcode WHERE series_index = " + str(index) + " AND zipcode = '" + addressInfo[
                'zipcode'] + "' AND " + addressInfo['house_nr'].split('-')[
                           0] + " BETWEEN breakpoint_from and breakpoint_to"

            query = header + "((" + zipQuery + "), '" + addressInfo['house_nr'] + "', '" + addressInfo[
                'house_ad'] + "')"

            try:
                cursor.execute(query)

                # Get last ID
                cursor.execute("SELECT @@IDENTITY AS ID;")
                address_id = cursor.fetchone()[0]

                # Import restaurant
                query = "INSERT INTO restaurant (adress_id, name, phone_nr) VALUES"
                query += "(" + str(address_id) + ", '" + addressInfo['name'] + "', " + addressInfo['phone_nr'] + ")"

                cursor.execute(query)

            except Exception as e:
                code = e.args[0]
                if code in Logger().errors['Restaurants']:
                    Logger().errors['Restaurants'][code] += 1
                else:
                    Logger().errors['Restaurants'][code] = 1

        cursor.commit()
Пример #19
0
 def __init__(self, segments, settings, segment_generator, effects):
     self.segments = segments
     self.settings = settings
     self.breath_pause = segment_generator.generate_breath_pause()
     self.effects = effects
     self.segment_selector = SegmentSelector(segments)
     self.logger = Logger()
     self.result = Result()
     self.init()
 def __init__(self):
     self.api_key = ""
     self.api_secret_key = ""
     self.get_api_key()
     self.api_endpoint = "https://api.liquid.com"
     self.balance = None
     self.connect_timeout = 3.0  # サーバとのコネクトタイムアウト
     self.read_timeout = 10.0  # サーバからの読み込みタイムアウト
     self.logger = Logger()
Пример #21
0
def setup_train(config):
    model = get_model(config)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.start_lr)
    lr_scheduler = CosineAnnealingLR(optimizer,
                                     T_max=config.optim_steps,
                                     eta_min=config.end_lr)
    logger = Logger(config)
    validator = Validator(config, logger)

    return model, optimizer, lr_scheduler, logger, validator
Пример #22
0
def main(config):

    config_proto = tf.ConfigProto()
    config_proto.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=config_proto)
    dataloader = Dataloader(sess, config, prefetch_buffer_size=10)
    model = Model(sess, dataloader, config)
    model.build_model()
    logger = Logger(sess, config)
    trainer = Trainer(sess, model, dataloader, logger, config)
    trainer.train()
    def __init__(self):
        config = ConfigParser().get_config()
        try:
            self.conn = pyodbc.connect(
                'DRIVER={SQL Server};SERVER=' + config["Server"] + ';DATABASE=' + config[
                    "Database"] + ';UID=' + config["Username"] + ';PWD=' + config["Password"])

            self.cursor = self.conn.cursor()
        except pyodbc.Error as ex:
            Logger().error("Something went wrong connecting to database: " + ex.args[1])
            exit()
Пример #24
0
    def main(config):
        """Core Loop"""

        # Create Logger Instance
        Logger(config['logging_level'])
        logger = logging.getLogger(__name__ + ".Controller")
        logger.disabled = config['disable_logging']
        logger.info("Initialized Logger")

        # Create PerformanceAnalysis instance
        perf = Performance()
        logger.info("Initialized Performance Analysis")

        # Create InfluxPusher instance
        if config['enable_influxdb'] is True:
            influx_killmail = InfluxPusherKillmail(
                config['influx']['ip'], config['influx']['port'],
                config['influx']['database_name'])

        influx_perf = InfluxPusherPerformance(
            config['influx']['ip'], config['influx']['port'],
            config['influx']['database_name'])

        # Create MongoPusher intance
        if config['enable_mongodb'] is True:
            mongo = MongoPusher(config['mongo']['ip'], config['mongo']['port'],
                                config['mongo']['database_name'])

        # Create RedisQ instance
        redis = RedisQ()

        while True:
            unprocessed_killmail = RedisQ.makeCall(redis)

            perf.setCycleStart()

            if config['enable_mongodb'] is True:
                mongo.writeToDatabase(unprocessed_killmail)
                logger.debug("Wrote to Mongo")

            if config['enable_influxdb'] is True:
                killmail = Killmail(unprocessed_killmail)
                logger.debug("Processed Killmail")

                influx_killmail.writeToDatabase(killmail)
                logger.debug("wrote killmail to influx")

            perf.setCycleEnd()
            perf.calcCycleStats()

            if config['enable_performance_logging'] is True:
                influx_perf.writeToDatabase(perf)
                logger.debug("wrote performance to influx")
Пример #25
0
def split_images(files, output_root_path, category):
    logger = Logger()
    show_viewer = not FLAGS.using_only_summary
    pre_select_marks = load_pre_select_marks(output_root_path, category)

    output_normal_path, output_defect_path = create_image_output_dir(
        output_root_path, category)
    grid_viewer = GridViewer()
    json_datas = []

    for image_path in files:
        _, file_name = os.path.split(image_path)
        if did_processed(file_name, category, output_normal_path,
                         output_defect_path, logger):
            continue

        image = cv2.imread(image_path)
        selected_marks = find_pre_select_marks_by_file_name(
            pre_select_marks, file_name)

        if show_viewer:
            return_flag, selected_marks = grid_viewer.select_mark(
                image, rgb_image=False)

            if return_flag.skip:
                logger.append_processed_image(file_name)
                continue
            elif return_flag.stop:
                break
        elif len(selected_marks) == 0:
            continue

        normal_images, defect_images = split_image_by_grid_marks(
            image, selected_marks)
        print("normal {} / defect {}".format(len(normal_images),
                                             len(defect_images)))

        save_split_images(normal_images,
                          output_normal_path,
                          category,
                          base_name=file_name)
        save_split_images(defect_images,
                          output_defect_path,
                          category,
                          base_name=file_name)

        logger.append_processed_image(file_name)
        summary = {"file": file_name, "data": grid_viewer.get_summary()}
        json_datas.append(summary)

        save_summary(output_root_path, category, summary)

    return json_datas
Пример #26
0
 def __init__(self, **kwargs):
     self.parameters = kwargs
     self.logger = Logger(**kwargs)
     self.data_loader = DataLoader(**kwargs)
     self.model = GCN(input_dim=self.data_loader.get_input_feat_size(),
                      hidden_dim=kwargs['hidden_dim'],
                      num_classes=self.data_loader.get_num_classes(),
                      dropout_prob=kwargs['dropout_prob'],
                      bias=kwargs['bias'])
     self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                       lr=kwargs['lr'])
     self.cross_entropy = torch.nn.NLLLoss()
Пример #27
0
    def __init__(self, model=None, train_loader=None, val_loader=None,
                 test_loader=None,
                 if_checkpoint_save=True,
                penalty=None,
                 print_result_epoch=False,
                 print_metric_name=None,
                 metrics=None,
                 score_function=None,
                 create_save_file_name=None,
                 target_reshape=None, **kwargs):
        self.test_loader = test_loader
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.model = model
        self.train_current_batch_data = {}
        self.valid_current_batch_data = {}
        self.if_checkpoint_save = if_checkpoint_save
        self.print_result_epoch = print_result_epoch
        self.penalty = penalty
        self.target_reshape = target_reshape
        self.metrics = metrics
        self.score_function = score_function
        self.create_save_file_name = create_save_file_name
        self.print_metric_name = print_metric_name


        self.epochs = self.model.get_epochs()
        self._optimizer = self.model.get_optimizer()
        self._criterion = self.model.get_criterion()
        self._lr_adjust = self.model.get_lr_scheduler()
        self._tensorboard_path = self.model.get_tensorboard_path()
        self._save_path = self.model.get_logger_path()
        self._logger = Logger(self._tensorboard_path)

        if not os.path.exists(os.path.join(self._save_path, 'train_save')):
            os.makedirs(os.path.join(self._save_path, 'train_save'))
        if not os.path.exists(os.path.join(self._save_path, 'test_save')):
            os.makedirs(os.path.join(self._save_path, 'test_save'))
        print(self._save_path, self.create_save_file_name())

        self.train_checkpoint_save = os.path.join(self._save_path, 'train_save', self.create_save_file_name() + '_ckpt.path.tar')
        self.train_model_save = os.path.join(self._save_path,'train_save', self.create_save_file_name() + '_best.path.tar')

        self.test_checkpoint_save = os.path.join(self._save_path, 'test_save',
                                                 self.create_save_file_name() + '_ckpt.path.tar')
        self.test_model_save = os.path.join(self._save_path, 'test_save',
                                             self.create_save_file_name() + '_best.path.tar')

        if not isinstance(self._optimizer, torch.optim.Optimizer):
            raise TypeError('should be an torch.optim.Optimizer type, instead of {}'.format(type(self._optimizer)))

        global best_val_acc, best_test_acc
Пример #28
0
    def __init__(self):
        self.logger = Logger('logs/')

        # model
        self.model = None
        self.optimizer = None
        self.lr_scheduler = None

        # data
        self.train_loader = None
        self.val_loader = None
        self.train_data = None
        self.val_data = None
Пример #29
0
    def __init__(self, config):
        self.logger = Logger("Server")

        if not config:
            self.logger.warning("Unable to find server configuration")
            sys.exit()

        self.type = config["type"]
        self.port = config["port"]
        self.max_penguins = config["max_penguins"] if self.type is "world" else 150

        self.penguins = []

        self.data_handler = DataHandler(self)
Пример #30
0
    def __init__(self, chromedriver, delay=3):
        self._logger = Logger('cybot.log')

        self._logger.info('크롬 드라이버 로딩 중..')
        driver = webdriver.Chrome(chromedriver)
        driver.implicitly_wait(5)
        self._logger.info('크롬 드라이버 로딩 완료')

        self._chromedriver = chromedriver
        self._base_url = 'https://cy.cyworld.com'
        self._user_id = ''
        self._delay = delay
        self._driver = driver
        self._wait = WebDriverWait(driver, 5)