Ejemplo n.º 1
0
    def train_parser(cls, options, data_train=None, data_dev=None, data_test=None):
        set_proc_name(options.title)
        ensure_dir(options.output)
        path = os.path.join(options.output, "{}_{}_train.log".format(options.title,
                                                                     int(time.time())))
        log_to_file(path)
        logger.name = options.title
        cls.options_hook(options)
        DataFormatClass = cls.get_data_formats()[options.data_format]

        if data_train is None:
            data_train = DataFormatClass.from_file(options.conll_train)

        if data_dev is None:
            data_dev = {i: DataFormatClass.from_file(i, False) for i in options.conll_dev}

        try:
            os.makedirs(options.output)
        except OSError:
            pass

        parser = cls(options, data_train)
        random_obj = random.Random(1)
        for epoch in range(options.epochs):
            logger.info('Starting epoch %d', epoch)
            random_obj.shuffle(data_train)
            options.is_train = True
            parser.train(data_train)

            # save model and delete old model
            for i in range(0, epoch - options.max_save):
                path = os.path.join(options.output, os.path.basename(options.model)) + str(i + 1)
                if os.path.exists(path):
                    os.remove(path)
            path = os.path.join(options.output, os.path.basename(options.model)) + str(epoch + 1)
            parser.save(path)

            def predict(sentences, gold_file, output_file):
                options.is_train = False
                with open(output_file, "w") as f_output:
                    if hasattr(DataFormatClass, "file_header"):
                        f_output.write(DataFormatClass.file_header + "\n")
                    for i in parser.predict(sentences):
                        f_output.write(i.to_string())
                # script_path = os.path.join(os.path.dirname(__file__), "main.py")
                # p = subprocess.Popen([sys.executable, script_path, "mst+empty", "predict", "--model", path,
                #                       "--test", gold_file,
                #                       "--output", output_file], stdout=sys.stdout)
                # p.wait()
                DataFormatClass.evaluate_with_external_program(gold_file, output_file)

            for file_name, file_content in data_dev.items():
                try:
                    prefix, suffix = os.path.basename(file_name).rsplit(".", 1)
                except ValueError:
                    prefix = os.path.basename(file_name)
                    suffix = ""

                dev_output = os.path.join(options.output, '{}_epoch_{}.{}'.format(prefix, epoch + 1, suffix))
                predict(file_content, file_name, dev_output)
Ejemplo n.º 2
0
    def train_parser(cls, options, data_train=None, data_dev=None, data_test=None):
        if sys.platform.startswith("linux"):
            set_proc_name(options.title)
        ensure_dir(options.output)
        path = os.path.join(options.output, "{}_{}_train.log".format(options.title,
                                                                     int(time.time())))
        log_to_file(path)
        logger.name = options.title
        cls.options_hook(options)
        DataFormatClass = cls.get_data_formats()[options.data_format]

        if data_train is None:
            data_train = DataFormatClass.from_file(options.conll_train)

        if data_dev is None:
            data_dev = {i: DataFormatClass.from_file(i, False) for i in options.conll_dev}

        if data_test is None and options.conll_test is not None:
            data_test = DataFormatClass.from_file(options.conll_test, False)
        else:
            data_test = None

        try:
            os.makedirs(options.output)
        except OSError:
            pass

        return cls.repeat_train_and_validate(data_train, data_dev, data_test, options)
Ejemplo n.º 3
0
    def compile(self):
        super().compile()
        try:
            for file in self.get_file_list():
                Cetus.replace_line_in_code(file["file_full_path"], GlobalsConfig.OMP_HEADER, '')
                cwd_path = os.path.dirname(file["file_full_path"])
                self.copy_headers(cwd_path)
                logger.info(f'{Cetus.__name__}: start parallelizing {file["file_name"]}')
                command = [f'cetus {" ".join(self.get_compilation_flags())} {file["file_name"]}']
                stdout, stderr, ret_code = run_subprocess(command, cwd_path)
                log_file_path = f'{os.path.splitext(file["file_full_path"])[0]}{CetusConfig.LOG_FILE_SUFFIX}'
                logger.log_to_file(f'{stdout}\n{stderr}', log_file_path)
                logger.debug(f'{Cetus.__name__}: {stdout}')
                logger.debug_error(f'{Cetus.__name__}: {stderr}')
                logger.info(f'{Cetus.__name__}: finished parallelizing {file["file_name"]}')
                # Replace file from cetus output folder into original file folder
                if os.path.isdir(os.path.join(cwd_path, CetusConfig.OUTPUT_DIR_NAME)):
                    src_file = os.path.join(cwd_path, CetusConfig.OUTPUT_DIR_NAME, file["file_name"])
                    dst_file = file["file_full_path"]
                    shutil.copy(src_file, dst_file)
                    shutil.rmtree(os.path.join(cwd_path, CetusConfig.OUTPUT_DIR_NAME))

                Cetus.inject_line_in_code(file["file_full_path"], GlobalsConfig.OMP_HEADER)
            return True
        except subprocess.CalledProcessError as ex:
            log_file_path = f'{os.path.splitext(file["file_full_path"])[0]}{CetusConfig.LOG_FILE_SUFFIX}'
            logger.log_to_file(f'{ex.output}\n{ex.stderr}', log_file_path)
            raise CombinationFailure(f'cetus return with {ex.returncode} code: {str(ex)} : {ex.output} : {ex.stderr}')
        except Exception as ex:
            raise CompilationError(str(ex) + " files in directory " + self.get_input_file_directory() +
                                   " failed to be parallel!")
Ejemplo n.º 4
0
 def __run_p4a_process(self):
     self.files_to_compile += [
         file_dict['file_full_path'] for file_dict in self.get_file_list()
     ]
     command = 'PATH=/bin:$PATH p4a -vv ' + ' '.join(self.files_to_compile)
     if self.extra_files:
         command += f' {" ".join(self.extra_files)}'
     command += ' ' + ' '.join(map(str, super().get_compilation_flags()))
     if self.include_dirs_list:
         command += ' -I ' + ' -I '.join(
             map(
                 lambda x: os.path.join(self.get_input_file_directory(),
                                        str(x)), self.include_dirs_list))
     try:
         logger.info(f'{Par4all.__name__}: start parallelizing')
         stdout, stderr, ret_code = run_subprocess([
             command,
         ], self.get_input_file_directory())
         log_file_path = os.path.join(self.get_input_file_directory(),
                                      Par4allConfig.LOG_FILE_NAME)
         logger.log_to_file(f'{stdout}\n{stderr}', log_file_path)
         logger.debug(f'{Par4all.__name__}: {stdout}')
         logger.debug_error(f'{Par4all.__name__}: {stderr}')
         logger.info(f'{Par4all.__name__}: finished parallelizing')
     except subprocess.CalledProcessError as e:
         log_file_path = os.path.join(self.get_input_file_directory(),
                                      Par4allConfig.LOG_FILE_NAME)
         logger.log_to_file(f'{e.output}\n{e.stderr}', log_file_path)
         raise CombinationFailure(
             f'par4all return with {e.returncode} code: {str(e)} : {e.output} : {e.stderr}'
         )
     except Exception as e:
         raise CompilationError(
             f"{e}\nfiles in directory {self.get_input_file_directory()} failed to be parallel!"
         )
Ejemplo n.º 5
0
 def initialize_blind(self, src):
     src_text = self.check_source(src)
     self.setup_gpio()
     GPIO.output(self.go_up_pin, 1)
     GPIO.output(self.go_down_pin, 1)
     logger.log_to_file(
         f"{src_text} Blind '{self.device_name} initialized.",
         logs_directory)
Ejemplo n.º 6
0
 def stop_blind(self, src):
     src_text = self.check_source(src)
     self.setup_gpio()
     GPIO.output(self.go_up_pin, 1)
     GPIO.output(self.go_down_pin, 1)
     logger.log_to_file(f"{src_text} Blind '{self.device_name} stopped.",
                        logs_directory)
     print("stopped")
Ejemplo n.º 7
0
 def turn_off_light(self):
     print(
         f"Dummy opening, {self.room_name}, {self.device_type}, {self.device_name}, {self.off_pin}"
     )
     logger.log_to_file(
         f"Light '{self.device_name}' in '{self.room_name}' turning off...",
         logs_directory)
     logger.log_to_file(
         f"Light '{self.device_name}' in '{self.room_name}' turned off.",
         logs_directory)
Ejemplo n.º 8
0
 def initialize_light(self):
     for pin in self.on_pin, self.off_pin:
         logger.log_to_file(
             f"Light '{self.device_name} in '{self.room_name}' initializing...",
             logs_directory)
         # GPIO.setup(xxxx, xxx)
         # GPIO.output(pin, 1)
         print("Initialize light")
         logger.log_to_file(
             f"Light '{self.device_name} in '{self.room_name}' initialized",
             logs_directory)
Ejemplo n.º 9
0
    def train_parser(cls, options, data_train=None, data_dev=None, data_test=None):
        set_proc_name(options.title)
        ensure_dir(options.output)
        path = os.path.join(options.output, "{}_{}_train.log".format(options.title,
                                                                     int(time.time())))
        log_to_file(path)
        logger.name = options.title

        logger.info('Options:\n%s', pformat(options.__dict__))
        if data_train is None:
            data_train = cls.DataType.from_file(options.conll_train)

        if data_dev is None:
            data_dev = {i: cls.DataType.from_file(i, False) for i in options.conll_dev}

        try:
            os.makedirs(options.output)
        except OSError:
            pass

        parser = cls(options, data_train)
        random_obj = random.Random(1)

        def do_predict(epoch):
            for file_name, dev_sentences in data_dev.items():
                try:
                    prefix, suffix = os.path.basename(file_name).rsplit(".", 1)
                except ValueError:
                    prefix = file_name
                    suffix = ""

                dev_output = os.path.join(options.output, '{}_epoch_{}.{}'.format(prefix, epoch, suffix))
                cls.predict_and_output(parser, options, dev_sentences, dev_output)

        if options.epochs == 0:
            print("Predict directly.")
            do_predict(0)

        for epoch in range(options.epochs):
            logger.info('Starting epoch %d', epoch)
            random_obj.shuffle(data_train)
            parser.train(data_train)

            # save model and delete old model
            for i in range(0, epoch - options.max_save):
                path = os.path.join(options.output, os.path.basename(options.model)) + str(i + 1)
                if os.path.exists(path):
                    os.remove(path)
            path = os.path.join(options.output, os.path.basename(options.model)) + str(epoch + 1)
            parser.save(path)
            do_predict(epoch)
Ejemplo n.º 10
0
 def compile(self):
     super().compile()
     # Parallelizing
     try:
         for file in self.get_file_list():
             self.run_autopar(file["file_name"], file["file_full_path"], self.get_compilation_flags())
         return True
     except subprocess.CalledProcessError as e:
         log_file_path = f'{os.path.splitext(file["file_full_path"])[0]}{AutoParConfig.LOG_FILE_SUFFIX}'
         logger.log_to_file(f'{e.output}\n{e.stderr}', log_file_path)
         raise CombinationFailure(f'autopar return with {e.returncode} code: {str(e)} : {e.output} : {e.stderr}')
     except Exception as e:
         raise CompilationError(str(e) + " files in directory " + self.get_input_file_directory() +
                                " failed to be parallel!")
Ejemplo n.º 11
0
    def log(self):
        interruptions = len(self.buffering_time) - 1

        # Checking if player is on initial buffering state
        if interruptions > 0 or not self.is_buffering:
            initial_wait = self.buffering_time[0]
        else:
            initial_wait = -1

        # Removing invalid samples
        buffering_time = self.buffering_time[1:]
        if self.is_buffering:
            buffering_time = buffering_time[:-1]

        # Calculating statistics
        mean_time = stats.avg(buffering_time)
        std_time = stats.std(buffering_time)

        # Logging
        logger.log("--*--Buffer statistics--*--")
        logger.log("Time to start playback (s): %d" % initial_wait)
        logger.log("Number of interruptions: %d" % interruptions)
        logger.log("Interruption time (s) - mean: %f" % mean_time)
        logger.log("Interruption time (s) - standard deviation: %f" % std_time)
        logger.log("Interruptions (s): %r" % buffering_time)

        logger.log_to_file("playback_start_time, %d\r\n" % initial_wait)
        logger.log_to_file("interruptions, %d\r\n" % interruptions)
        logger.log_to_file("interruption_time_mean, %f\r\n" % mean_time)
        logger.log_to_file("interruption_time_stdev, %f\r\n" % std_time)
Ejemplo n.º 12
0
    def log(self):
        interruptions = len(self.buffering_time) - 1

        # Checking if player is on initial buffering state
        if interruptions > 0 or not self.is_buffering:
            initial_wait = self.buffering_time[0]
        else:
            initial_wait = -1

        # Removing invalid samples
        buffering_time = self.buffering_time[1:]
        if self.is_buffering:
            buffering_time = buffering_time[:-1]

        # Calculating statistics
        mean_time = stats.avg(buffering_time)
        std_time = stats.std(buffering_time)

        # Logging
        logger.log("--*--Buffer statistics--*--")
        logger.log("Time to start playback (s): %d" % initial_wait)
        logger.log("Number of interruptions: %d" % interruptions)
        logger.log("Interruption time (s) - mean: %f" % mean_time)
        logger.log("Interruption time (s) - standard deviation: %f" % std_time)
        logger.log("Interruptions (s): %r" % buffering_time)

        logger.log_to_file("playback_start_time, %d\r\n" % initial_wait)
        logger.log_to_file("interruptions, %d\r\n" % interruptions)
        logger.log_to_file("interruption_time_mean, %f\r\n" % mean_time)
        logger.log_to_file("interruption_time_stdev, %f\r\n" % std_time)
Ejemplo n.º 13
0
 def run_autopar(self, file_name: str, file_full_path: str, options: list):
     logger.info(f'{Autopar.__name__}: started parallelizing {file_name}')
     command = 'autoPar'
     if self.include_dirs_list:
         command += ' -I' + ' -I'.join(map(lambda x: os.path.join(self.get_input_file_directory(), str(x)),
                                           self.include_dirs_list))
     command += f' {" ".join(options)} -c {file_name}'
     stdout, stderr, ret_code = run_subprocess([command], os.path.dirname(file_full_path))
     log_file_path = f'{os.path.splitext(file_full_path)[0]}{AutoParConfig.LOG_FILE_SUFFIX}'
     logger.log_to_file(f'{stdout}\n{stderr}', log_file_path)
     dir_path, file_name = os.path.split(file_full_path)
     parallel_file_full_path = os.path.join(dir_path, f'{AutoParConfig.OUTPUT_FILE_NAME_PREFIX}{file_name}')
     if os.path.exists(parallel_file_full_path):
         os.remove(file_full_path)
         os.rename(parallel_file_full_path, file_full_path)
     logger.debug(f'{Autopar.__name__}: {stdout}')
     logger.debug_error(f'{Autopar.__name__}: {stderr}')
     logger.info(f'{Autopar.__name__}: finished parallelizing {file_name}')
Ejemplo n.º 14
0
def handle_ssh_errors(status, output, cmd):
    if "install -y nginx" in cmd:
        print(
            colored('Failure installing nginx \n',
                    'red',
                    attrs=['reverse', 'blink']))
        log_to_file(status, output)
    elif "scp" in cmd:
        print(
            colored(
                'Failure copying script using SCP \n Please Check the Command and Restart Program to Try Process Again\n',
                'red',
                attrs=['reverse', 'blink']))
        log_to_file(status, output)
        sys.exit(1)
    elif "chmod 700" in cmd:
        print(
            colored(
                'Failure changing permissions on webserver script \n Please Check the Command and Restart Program To Try Process Again\n',
                'red',
                attrs=['reverse', 'blink']))
        log_to_file(status, output)
        sys.exit(1)
    elif "-y python34" in cmd:
        print(
            colored('Failure installing python\n',
                    'red',
                    attrs=['reverse', 'blink']))
        log_to_file(status, output)
        sys.exit(1)
    elif "python3 start_webserver.py" in cmd:
        print(
            colored(
                "Failure starting webserver \n Please Check the Command and Restart Program To Try Process Again \n",
                'red',
                attrs=['reverse', 'blink']))
        log_to_file(status, output)
        sys.exit(1)
    else:
        print('Error')
Ejemplo n.º 15
0
def main():
    # parse config
    config_file = sys.argv[1]
    config = Config(config_file)

    # setup logger
    setup_logging(config.working_dir)

    # encoding func
    encoding_func = ENCODING_METHOD_MAP[config.encoding_method]
    encoding_func2= ENCODING_METHOD_MAP[config.encoding_method2]
    log_to_file('Encoding method2', config.encoding_method2)

    data_provider=[]
    for p in range(config.base_model_count):
        temp_provider = DataProvider(
             encoding_func,
             encoding_func2,
             config.data_file,
             config.test_file,
             config.batch_size,
             max_len_hla=config.max_len_hla,
             max_len_pep=config.max_len_pep,
             model_count=config.model_count
        )
        data_provider.append(temp_provider)
 
    log_to_file('max_len_hla', data_provider[0].max_len_hla)
    log_to_file('max_len_pep', data_provider[0].max_len_pep)
    
    test(config, data_provider[0])
Ejemplo n.º 16
0
 def close_blind(self, src):
     src_text = self.check_source(src)
     self.setup_gpio()
     # If blind is opening (0 on input) - stop opening
     if not GPIO.input(self.go_up_pin):
         GPIO.output(self.go_up_pin, 1)
         logger.log_to_file(
             f"{src_text} Blind '{self.device_name}' in '{self.room_name}' opening aborted!",
             logs_directory)
     logger.log_to_file(
         f"{src_text} Blind '{self.device_name}' in '{self.room_name}' closing...",
         logs_directory)
     GPIO.output(self.go_down_pin, 0)
     for i in range(150):
         sleep(0.2)
         # Break when during closing OPEN Button or STOP Button clicked
         if not GPIO.input(self.go_up_pin) or (GPIO.input(
                 self.go_up_pin) and GPIO.input(self.go_down_pin)):
             GPIO.output(self.go_down_pin, 1)
             break
     else:
         GPIO.output(self.go_down_pin, 1)
         logger.log_to_file(
             f"{src_text} Blind '{self.device_name}' in '{self.room_name}' closed.",
             logs_directory)
         print(f"Blind '{self.device_name}' in '{self.room_name}' closed.")
Ejemplo n.º 17
0
def handle_ssh_errors(status, output, cmd):
 if "install -y nginx" in cmd:
     print(colored('Failure installing nginx \n', 'red',attrs=['reverse', 'blink']))
     log_to_file(status, output)
 elif "scp" in cmd:
     print(colored('Failure copying script using SCP \n Please Check the Command and Restart Program to Try Process Again\n', 'red',attrs=['reverse', 'blink']))
     log_to_file(status, output)
     sys.exit(1)
 elif "chmod 700" in cmd:
     print(colored('Failure changing permissions on webserver script \n Please Check the Command and Restart Program To Try Process Again\n', 'red',attrs=['reverse', 'blink']))
     log_to_file(status, output) 
     sys.exit(1)
 elif "-y python34" in cmd:
     print(colored('Failure installing python\n', 'red',attrs=['reverse', 'blink']))
     log_to_file(status, output)
     sys.exit(1)
 elif "python3 start_webserver.py" in cmd:
     print(colored("Failure starting webserver \n Please Check the Command and Restart Program To Try Process Again \n", 'red',attrs=['reverse', 'blink']))
     log_to_file(status, output)
     sys.exit(1)
 else:
     print('Error')
Ejemplo n.º 18
0
def test(config, data_provider):

    if not config.do_test:
        log_to_file('Skip testing', 'Not enabled testing')
        return

    device = config.device
    
    temp_list=[] 
    for p in range(config.base_model_count):
        for k in range(config.model_count):
    # load and prepare model
             state_dict = torch.load(config.model_save_path(p*config.model_count+k))
             model = Model(config)
             model.load_state_dict(state_dict)
             model.to(device)
             model.eval()
             temp_dict={}
             data_provider.new_epoch()
             for _ in range(data_provider.test_steps()):
                   data = data_provider.batch_test()
                   with torch.no_grad():
                        pred_ic50, uid_list= batch_test(model, device, data, config)
                        for i, uid in enumerate(uid_list):
                            temp_dict[uid] = pred_ic50[i].item()
             temp_list.append(temp_dict)

    # average score of the emsemble model
    result_dict=temp_list[0]
    if config.model_count>1:
       for k in range(1,config.model_count):
           for j in result_dict.keys():
                result_dict[j]+=temp_list[k][j]

    if config.base_model_count>1:
       for p in range(1,config.base_model_count):
           for k in range(config.model_count):
               for j in result_dict.keys():
                   result_dict[j]+=temp_list[p*config.model_count+k][j]

    for j in result_dict.keys():
    	result_dict[j]=result_dict[j]/(config.model_count*config.base_model_count)

    # print(result_dict)
    result_file = weeekly_result_writer(result_dict, config)
    log_to_file('Testing result file', result_file)

    metric_file = write_metrics_file(result_file, config)
    log_to_file('Testing metric result file', metric_file)
Ejemplo n.º 19
0
def index(request):

    room_names = Room.objects.all()

    context = {'room_names': room_names}

    if request.POST:
        received_data = request.POST
        main_action = received_data.get('main_action')
        try:
            if "all_blinds_open" in main_action:
                logger.log_to_file(f"[WebServer] All blinds opening...",
                                   logger.get_logs_directory())
                open_all_blinds(source="web")
            elif "all_blinds_close" in main_action:
                logger.log_to_file(f"[WebServer] All blinds closing...",
                                   logger.get_logs_directory())
                close_all_blinds(source="web")
            elif "all_blinds_stop" in main_action:
                logger.log_to_file(f"[WebServer] All blinds stopping...",
                                   logger.get_logs_directory())
                stop_all_blinds(source="web")
            elif "emergency_stop" in main_action:
                logger.log_to_file(f"[WebServer] Emergency stop performing...",
                                   logger.get_logs_directory())
                stop_all_blinds(source="web")
                for light in Light.objects.all():
                    light.initialize_light()
            elif "restart_system" in main_action:
                logger.log_to_file(f"[WebServer] Restart system performing...",
                                   logger.get_logs_directory())
                manual_buttons.system_restart()
            elif "shutdown_system" in main_action:
                logger.log_to_file(
                    f"[WebServer] Shutdown system performing...",
                    logger.get_logs_directory())
                manual_buttons.system_shutdown()
            else:
                pass
        except:
            return HttpResponse("")

    return render(request, 'devices/index.html', context)
Ejemplo n.º 20
0
def train(config, data_provider, p):
    # skip training if test mode
    if not config.do_train:
        log_to_file('Skip train', 'Not enabled training')
        return
    device = config.device
    log_to_file('Device', device)
    # log pytorch version
    log_to_file('PyTorch version', torch.__version__)
    # prepare model
    log_to_file('based on base_model #', p)

    for i in range(config.model_count):
        log_to_file('begin training model #', i)
        model = Model(config)
        weight_initial(model, config)
        model.to(device)
        # state_dict = torch.load(config.model_base_path(p))
        # model = Model(config)
        # model.load_state_dict(state_dict)
        # model.to(device)
        # log param count
        log_to_file('Trainable params count', count_parameters(model))
        print(model.parameters())
        # exit()
        # OPTIMIZER
        optimizer = optim.SGD(model.parameters(), lr=config.start_lr)
        log_to_file("Optimizer", "SGD")
        # call backs
        scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                                   threshold=config.loss_delta,
                                                   patience=4,
                                                   cooldown=4,
                                                   verbose=True,
                                                   min_lr=config.min_lr,
                                                   factor=0.2)
        model_check_callback = ModelCheckPointCallBack(
            model,
            config.model_save_path(p * config.model_count + i),
            period=1,
            delta=config.loss_delta,
        )
        early_stop_callback = EarlyStopCallBack(patience=25,
                                                delta=config.loss_delta)

        # some vars
        epoch_loss = 0
        validation_loss = 0
        data_provider.new_epoch()
        # reset data provider
        # output the validation dataset

        # val_data_path = os.path.join(config.working_dir, 'val_data_{}.csv'.format(p*config.model_count+i))

        # val_df=pd.DataFrame(data_provider.validation_samples[i],columns=["hla_a","peptide", "ic50"])
        # val_df.to_csv(val_data_path, sep=',',header=True,index=True)

        steps = data_provider.train_steps()
        log_to_file('Start training1', datetime.datetime.now())

        for epoch in range(config.epochs):
            epoch_start_time = datetime.datetime.now()
            # train batches
            print(steps)
            model.train(True)
            for _ in range(steps):
                data = data_provider.batch_train(i)
                print("***")
                loss = batch_train(model, device, data, config)
                print("loss:", loss)
                # exit()
                loss.backward()
                # clip grads
                nn.utils.clip_grad_value_(model.parameters(), config.grad_clip)
                # update params
                optimizer.step()
                # record loss
                epoch_loss += loss.item()
                # reset grad
                optimizer.zero_grad()
                # time compute
            time_delta = datetime.datetime.now() - epoch_start_time
            # validation on epoch end

            model.eval()
            # print(data_provider.val_steps())
            # print(data_provider.batch_index_val)
            # validation_call
            val_sample = []
            val_pred = []
            for _ in range(data_provider.val_steps()):
                data = data_provider.batch_val(i)
                t_loss, t_pred, t_samples = batch_validation(
                    model, device, data, config)
                val_sample.append(t_samples)
                val_pred.append(t_pred)
                validation_loss += t_loss
            # log
            log_to_file(
                "Training process",
                "[base_model{0:1d}]-[model{1:1d}]-[Epoch {2:04d}] - time: {3:4d} s, train_loss: {4:0.5f}, val_loss: {5:0.5f}"
                .format(p, i, epoch, time_delta.seconds, epoch_loss / steps,
                        validation_loss / data_provider.val_steps()))
            # call back
            model_check_callback.check(
                epoch, validation_loss / data_provider.val_steps())
            if early_stop_callback.check(
                    epoch, validation_loss / data_provider.val_steps()):
                break
            # LR schedule
            scheduler.step(loss.item())
            # reset loss
            epoch_loss = 0
            validation_loss = 0
            # reset data provider
            data_provider.new_epoch()
            # save last epoch model
            torch.save(
                model.state_dict(),
                os.path.join(
                    config.working_dir,
                    'last_epoch_model_{}.pytorch'.format(p *
                                                         config.model_count +
                                                         i)))
        #validation_call
        val_path = os.path.join(
            config.working_dir,
            'val_result_{}.csv'.format(p * config.model_count + i))

        val_temp_list = []
        for ii in range(len(val_sample)):
            for jj in range(len(val_sample[ii])):
                temp = {
                    "hla_a": val_sample[ii][jj][0],
                    "peptide": val_sample[ii][jj][1],
                    "ic50": val_sample[ii][jj][2],
                    "pred_ic50": val_pred[ii][jj]
                }
                val_temp_list.append(temp)
        val_df = pd.DataFrame(val_temp_list)
        val_df["up_ic50"] = 50000**val_df["ic50"]
        val_df["up_pred_ic50"] = 50000**val_df["pred_ic50"]
        val_df["binding"] = val_df["up_ic50"].apply(lambda x: 1
                                                    if x < 500 else 0)
        val_df["pred_binding"] = val_df["up_pred_ic50"].apply(
            lambda x: 1 if x < 500 else 0)
Ejemplo n.º 21
0
def main():
    # parse config
    config_file = sys.argv[1]
    config = Config(config_file)

    # setup logger
    setup_logging(config.working_dir)

    # encoding func
    encoding_func = ENCODING_METHOD_MAP[config.encoding_method]
    encoding_func2 = ENCODING_METHOD_MAP[config.encoding_method2]
    log_to_file('Encoding method2', config.encoding_method2)

    data_provider = []
    for p in range(config.base_model_count):
        temp_provider = DataProvider(encoding_func,
                                     encoding_func2,
                                     config.data_file,
                                     config.test_file,
                                     config.batch_size,
                                     max_len_hla=config.max_len_hla,
                                     max_len_pep=config.max_len_pep,
                                     model_count=config.model_count)
        data_provider.append(temp_provider)

    log_to_file('Traning samples', len(data_provider[0].train_samples[0]))
    log_to_file('Val samples', len(data_provider[0].validation_samples[0]))
    log_to_file('Traning steps', data_provider[0].train_steps())
    log_to_file('Val steps', data_provider[0].val_steps())
    log_to_file('Batch size', data_provider[0].batch_size)
    log_to_file('max_len_hla', data_provider[0].max_len_hla)
    log_to_file('max_len_pep', data_provider[0].max_len_pep)

    for p in range(config.base_model_count):
        train(config, data_provider[p], p)
Ejemplo n.º 22
0
    def log(self):
        mean_download_rate = stats.avg(self.download_rates)
        std_download_rate = stats.std(self.download_rates)

        mean_upload_rate = stats.avg(self.upload_rates)
        std_upload_rate = stats.std(self.upload_rates)

        logger.log("--*--Torrent statistics--*--")
        logger.log("Download rate (KiB/s) - mean: %f" % mean_download_rate)
        logger.log("Download rate (KiB/s) - standard deviation: %f" % std_download_rate)
        logger.log("Upload rate (KiB/s) - mean: %f" % mean_upload_rate)
        logger.log("Upload rate (KiB/s) - standard deviation: %f" % std_upload_rate)

        logger.log_to_file("download_rate_mean, %f\r\n" % mean_download_rate)
        logger.log_to_file("download_rate_stdev, %f\r\n" % std_download_rate)
        logger.log_to_file("upload_rate_mean, %f\r\n" % mean_upload_rate)
        logger.log_to_file("upload_rate_stdev, %f\r\n" % std_upload_rate)

        if self.download_finished:
            logger.log("Download time (s): %d" % self.download_time)
            logger.log_to_file("download_time, %d\r\n" % self.download_time)
        else:
            logger.log_to_file("download_time, %d\r\n" % -1)

        self.buffer_manager.log()
Ejemplo n.º 23
0
    today_sunrise = sun_cycles.Sunrise()
    today_sunset = sun_cycles.Sunset()

    while True:
        # Checks between 3.00 and 3.03 AM
        if datetime.now().hour == 3 and datetime.now().minute < 3:
            logger.remove_old_logs(last_days_logging,
                                   logger.get_logs_directory())
            # Instantiate sunrise and sunset every day
            today_sunrise = sun_cycles.Sunrise()
            today_sunset = sun_cycles.Sunset()

        current_time = pytz.timezone("Europe/Warsaw").localize(datetime.now())
        # Sunrise
        if today_sunrise.period_on < current_time < today_sunrise.period_off:
            logger.log_to_file(f"[AutoSun] All blinds opening...",
                               logger.get_logs_directory())
            today_sunrise.action()

        if today_sunrise.period_on < current_time < today_sunrise.period_off:
            today_sunrise.set_inactive()
        # Sunset
        if today_sunset.period_on < current_time < today_sunset.period_off:
            logger.log_to_file(f"[AutoSun] All blinds closing...",
                               logger.get_logs_directory())
            today_sunset.action()

        if today_sunset.period_on < current_time < today_sunset.period_off:
            today_sunset.set_inactive()

        # Manual buttons
        if ManualButtons["Rolety_UP"].is_pressed():