Esempio n. 1
0
    def read(self):
        """
        Read stacks and check their compatibility
        """
        # Initialize 'stack' and 'reference stack' objects
        self.stack = Stack(self.paths.stack)
        self.ref_stack = Stack(self.paths.ref_stack)

        # Check stack's metrics
        if self.stack.info['Stack size'] != self.ref_stack.info['Stack size']:
            raise RuntimeError(
                'Stacks can\'t be processed; stack sizes don\'t match')
        elif self.stack.info['Resolution'] != self.ref_stack.info['Resolution']:
            raise RuntimeError(
                'Stacks can\'t be processed; stack resolutions don\'t match')
        elif self.stack.info['Pixel spacing'] != self.ref_stack.info[
                'Pixel spacing']:
            raise RuntimeError(
                'Stacks can\'t be processed; stack pixel spacings don\'t match'
            )
        elif self.stack.info['Slice thickness'] != self.ref_stack.info[
                'Slice thickness']:
            raise RuntimeError(
                'Stacks can\'t be processed; stack layer thicknesses don\'t match'
            )
        else:
            self.stack.load()
            self.ref_stack.load()
            self.save = Saver(self.paths, self.stack.info)
Esempio n. 2
0
def main():
    saver = Saver()
    train_data, val_data, test_data, raw_doc_list = load_data()

    print(train_data.graph.shape)
    if COMET_EXPERIMENT:
        with COMET_EXPERIMENT.train():
            saved_model, model = train(train_data, val_data, saver)
    else:
        saved_model, model = train(train_data, val_data, saver)
    with torch.no_grad():
        test_loss_model, preds_model = model(
            train_data.get_pyg_graph(device=FLAGS.device), test_data)
    eval_res = eval(preds_model, test_data, True)
    y_true = eval_res.pop('y_true')
    y_pred = eval_res.pop('y_pred')
    print("Test...")
    pprint(eval_res)
    if COMET_EXPERIMENT:
        from comet_ml.utils import ConfusionMatrix

        def index_to_example(index):
            test_docs_ids = test_data.node_ids
            return raw_doc_list[test_docs_ids[index]]

        confusion_matrix = ConfusionMatrix(
            index_to_example_function=index_to_example,
            labels=list(test_data.label_dict.keys()))
        confusion_matrix.compute_matrix(y_true, y_pred)

        with COMET_EXPERIMENT.test():
            COMET_EXPERIMENT.log_metrics(eval_res)
            COMET_EXPERIMENT.log_confusion_matrix(
                matrix=confusion_matrix,
                labels=list(test_data.label_dict.keys()))
Esempio n. 3
0
 def _start_loging(self):
     """
     Aloittaa tallennuksen, kun aloita tallennus -painiketta painetaan.
     """
     self.saver = Saver(datetime.now().strftime("%d.%m.%Y.%H.%M.%S.jsonlines"))
     self.saving_status = True
     self.statemachine = StateMachine()
Esempio n. 4
0
 def __init__(self, url, subscribe_message):
     websocket.enableTrace(True)
     self.ws = None
     while not self.ws:
         self.ws = websocket.create_connection(url)
     self.send(subscribe_message)
     self.saver = Saver()
Esempio n. 5
0
    def __init__(self,args,netdata,log):

        self.t = 0
        self.args = args
        self.sumo_cmd = set_sumo(args.gui, args.roadnet, args.max_steps, args.port)
        self.log = log
        print(self.sumo_cmd)

        self.save_path = set_save_path(args.roadnet,args.tsc)
        self.saver = Saver(self.save_path)

        self.max_steps = args.max_steps
        self.green_t = args.green_duration
        self.yellow_t = args.yellow_duration
        self.red_t = args.red_duration
        self.mode = args.mode
        self.scale = args.scale
        self.port = args.port
        self.netdata = netdata
        self.tl_ids = self.netdata['inter'].keys()
        self.sumo_process = subprocess.Popen(self.sumo_cmd)
        self.conn = traci.connect(self.port)

        self.netdata = self.update_netdata()
        self.vehiclegen = VehicleGen(self.netdata,self.conn,self.mode,self.scale,self.max_steps)


        self.Controllers = {str(id):None for id in self.tl_ids}
        self.create_controllers(self.args.tsc)
        if self.args.tsc == 'dqn' and self.args.tsc == 'test':
            self.load_model()
        self.v_start_times = {}
        self.v_travel_times = {}
        self.episode_performance = []
        self.set_item_path()
    def onStart(self, input_list):
        length_interleaving = input_list[0]
        save_and_load = Saver("data/")

        pbm_model = PBM([0.2] * length_interleaving, self.data)
        try:
            print("Attempting loading gamma's from pickle")
            gammas_pbm = save_and_load.load_python_obj("gammas_pbm")
            pbm_model.parameters = gammas_pbm
        except:
            print(
                "Did not find gamma's saved in pickle so will retrain and save"
            )
            pbm_model.train()
            save_and_load.save_python_obj(pbm_model.parameters, "gammas_pbm")

        random_model = Random_Click_Model([0.1] * length_interleaving,
                                          self.data)
        try:
            print("Attempting loading rho's from pickle")
            rho_random = save_and_load.load_python_obj("rho_random")
            random_model.parameters = rho_random
        except:
            print("Did not find rho saved in pickle so will retrain and save")
            random_model.train()
            save_and_load.save_python_obj(random_model.parameters,
                                          "rho_random")

        return (pbm_model, random_model)
Esempio n. 7
0
 def __init__(self):
     self.proxies = ProxyHandler()
     self.tasker = Tasker()
     self.saver = Saver()
     self.log_handler = LogHandler()
     self.tieba_task = tieba_task
     self.loop_num = LOOP_NUM
Esempio n. 8
0
def prepare_logger():
    model_dir, train_dir, log_dir = make_log_dirs(args.dump_dir, args.run_id)
    writer = SummaryWriter(log_dir)

    saver = Saver(model_dir=model_dir, max_to_keep=5)
    config_obj = dict(dataset_config=train_dataset.config,
                      model_config=dict(model_name='softmax+ave'),
                      train_config=vars(args))
    if not os.path.exists(os.path.join(log_dir, 'config.json')):
        with open(os.path.join(log_dir, 'config.json'), 'w') as f:
            json.dump(config_obj, f, indent=2)

    # Unbuffer output
    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w')

    tee = subprocess.Popen([
        "tee",
        os.path.join(
            train_dir,
            datetime.datetime.now().strftime('output_%H_%M_%d_%m_%Y.log'))
    ],
                           stdin=subprocess.PIPE)
    os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
    os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
    print(' '.join(sys.argv))

    return writer, saver
Esempio n. 9
0
def run():
    start_time = time.time()

    # 获取任务
    task = Task()
    url_list = task.get_task()

    # 爬取数据
    for i in range(LOOP_NUM):
        per_step_urls = task.get_urls(url_list)
        print('开始下一循环', end='\n' * 3)
        spider = Spider()
        spider.async_req(per_step_urls)

        # 保存数据
        saver = Saver()
        saver.run()

    logger.info('完成循环')
    print('finish')

    total_time = time.time() - start_time
    print(total_time)
    logger.info('结束时间: {}'.format(datetime.now()))
    logger.info('总共费时: {}'.format(total_time))
Esempio n. 10
0
    def _init_architecture(self):
        tf.compat.v1.disable_eager_execution()
        self.input_pl = tf.compat.v1.placeholder(tf.float32,
                                                 shape=(None, self.input_dim))

        weights = []
        biases = []
        i_dim = self.input_dim

        # encoder layers
        for layer_size in self.layer_sizes:
            w = weight_variable([i_dim, layer_size])
            b = bias_variable([layer_size])
            i_dim = layer_size
            weights.append(w)
            biases.append(b)

        # decoder layers (tied weights case)
        decoder_weights = []
        decoder_biases = []
        for w in weights[::-1]:
            decoder_weights.append(tf.transpose(w))
            decoder_biases.append(bias_variable([int(w.shape[0])]))

        self.weights = weights
        self.biases = biases
        self.decoder_weights = decoder_weights
        self.decoder_biases = decoder_biases

        self.saver = Saver(self.weights + self.biases + self.decoder_biases)
Esempio n. 11
0
def main():
    t = time()
    check_flags()
    print(get_model_info_as_str())
    data_train = SiameseModelData(FLAGS.dataset_train)
    dist_sim_calculator = DistSimCalculator(
        FLAGS.dataset_train, FLAGS.ds_metric, FLAGS.ds_algo)
    model = create_model(FLAGS.model, data_train.input_dim(),
                         data_train, dist_sim_calculator)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    saver = Saver(sess)
    sess.run(tf.global_variables_initializer())
    if FLAGS.dataset_val_test == FLAGS.dataset_train:
        data_val_test = data_train
    else:
        # Generalizability test: val test on unseen train and test graphs.
        data_val_test = SiameseModelData(FLAGS.dataset_val_test)
    eval = Eval(data_val_test, dist_sim_calculator)
    try:
        train_costs, train_times, val_results_dict = \
            train_val_loop(data_train, data_val_test, eval, model, saver, sess)
        best_iter, test_results = \
            test(data_val_test, eval, model, saver, sess, val_results_dict)
        overall_time = convert_long_time_to_str(time() - t)
        print(overall_time)
        saver.save_overall_time(overall_time)
    except:
        traceback.print_exc()
    else:
        return train_costs, train_times, val_results_dict, best_iter, test_results
Esempio n. 12
0
def load_checkpoint(context):
    args = context['args']
    model = context['model']
    if args.resume == '':
        return
    t_saver = Saver(model_dir=args.resume)
    print('==> loading checkpoint from {}'.format(args.resume))
    if args.evaluate:
        checkpoint = t_saver.load_best()
    else:
        checkpoint = t_saver.load_latest()
    if checkpoint:
        best_metric = checkpoint['best_metric']
        context['best_metric'] = best_metric
        model.load_state_dict(checkpoint['model_state_dict'])
        if 'step' in checkpoint:
            step = checkpoint['step']
        else:
            step = 0
        if args.step != -1:
            step = args.step
        print("==> loaded checkpoint {} (step {}, best_metric {})".format(args.resume,
                                                                          step, best_metric))

        context['step'] = step
    else:
        raise RuntimeError("==> no checkpoint at: {}".format(args.resume))
Esempio n. 13
0
def prepare_logger(context):
    args = context['args']
    model = context['model']
    train_dataset = context['train_dataset']

    model_dir, train_dir, log_dir = make_log_dirs(args.dump_dir, args.run_id)
    writer = SummaryWriter(log_dir)

    saver = Saver(model_dir=model_dir, max_to_keep=5)
    config_obj = dict(dataset_config=[tt.config if hasattr(tt, 'config') else None for tt in train_dataset],
                      model_config=model.config if hasattr(model, 'config') else None, train_config=vars(args))
    if not os.path.exists(os.path.join(log_dir, 'config.json')):
        with open(os.path.join(log_dir, 'config.json'), 'w') as f:
            json.dump(config_obj, f, indent=2)

    # Unbuffer output
    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w')

    tee = subprocess.Popen(["tee", os.path.join(train_dir, datetime.datetime.now().strftime('output_%H_%M_%d_%m_%Y.log'))]
                           , stdin=subprocess.PIPE)
    os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
    os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
    print(' '.join(sys.argv))

    context['writer'] = writer
    context['saver'] = saver
    return writer, saver
Esempio n. 14
0
def start_simulation(initial_data, verbose=True):
    enable_contact_tracing = bool(initial_data['contact_tracing'])  # Enable contact tracing
    total_agents = int(initial_data['total_agents'])  # Number of Agents
    initially_infected_agents = int(initial_data['infected_agents'])  # Number of initially infected agents
    initially_healthy_agents = int(total_agents - initially_infected_agents)  # Number of initially healthy agents
    office_capacity = int(initial_data['office_capacity'])  # Capacity of agents per office
    house_capacity = int(initial_data['home_capacity'])  # Capacity of agents per house
    mortality_rate = float(initial_data['mortality_rate'])  # Mortality rate
    total_days_sick = int(initial_data['sick_days'])  # Number of days sick
    days_until_symptoms = int(initial_data['free_symptoms_days'])  # Number of days until symptoms
    total_days_simulated = int(initial_data['total_days'])  # Number of days of simulation
    risk_infection_home = float(initial_data['risk_home'])  # Risk of infection at home
    risk_infection_work = float(initial_data['risk_work'])  # Risk of infection at work
    verbose = bool(verbose)  # If we want printing during simulator run

    simulation_id = initial_data['simulation_id'].replace("-", "_")

    locations, agent_array = initialize(total_agents, initially_infected_agents, initially_healthy_agents,
                                        office_capacity, house_capacity, mortality_rate, total_days_sick,
                                        days_until_symptoms, total_days_simulated, risk_infection_home,
                                        risk_infection_work)

    saver = Saver(verbose, simulation_id)
    simulator = Simulator(enable_contact_tracing, locations, agent_array, saver, simulation_id)

    saver.initialize_db(locations, agent_array)

    while simulator.current_day <= total_days_simulated:
        simulator.step()

    return 200, saver.overview
Esempio n. 15
0
    def __init__(self):
        self.size = (550, int(550 * 1.12))

        self.display_window = py.display.set_mode((self.size))
        self.background = py.Surface(self.display_window.get_size()).convert()
        self.background.fill((0, 100, 0))
        self.myfont = py.font.SysFont('monospace', 16)

        self.space = Space(self.size)
        self.snake = Snake(self.space)
        self.saver = Saver()

        self.options = self.get_inside_options()
        self.snake.start(self.get_random_cell())

        self.food_cell = self.get_random_cell()
        self.food_cell.occupied = True

        self.fps_start = 15  #set frame rate, may change later
        self.fps = self.fps_start
        self.clock = py.time.Clock()
        self.delay = False  #trying to keep snake from running into itself

        self.mirage = []
        self._time = 0

        self.running = True
        self.paused = False
Esempio n. 16
0
    def __init__(self, saver = Saver(DB_SCHEME)):

        self.root = tk.Tk() 
        self.root.title('Photo Browser')
        self.config_data = {'bg':'#B8E0FE', 'width':500, 'height':450, 'font': ('Arial 12')}         # default configuration for widgets
        self.root.geometry('500x450+100+100')
        self.search_mode = False
        # temp dir for thumbnails
        self.thumbnail_dir = Path((Path().cwd() / 'Thumbnails'))
        # table to track number of copied photos every new folder
        self.TOTAL = {}
        # variablle to track the widgets in  the future
        self.msg = ''
        self.entries = []
        self.labels = []
        self.btn_names = []
        self.names=['Trash']
        self.next_bt =[]
        self.img_list=[]
        self.dir_to_organaize = ''
        self.num_of_dir = 1
        self.i = 0
        self.saver = saver
        self.im_size = (200,200)            # default
        # list to contain all selected photo
        self.chosen_pathes = set()
        # for testing mode
        self.testing = False
Esempio n. 17
0
def main():
    # parse options
    parser = TrainOptions()
    opts = parser.parse()

    # daita loader
    print('\n--- load dataset ---')
    os.makedirs(opts.dataroot, exist_ok=True)

    dataset = torchvision.datasets.CIFAR10(opts.dataroot, train=True, download=True, transform= transforms.Compose([
        transforms.Resize(opts.img_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
    train_loader = torch.utils.data.DataLoader(dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.nThreads)

    # model
    print('\n--- load model ---')
    model = CDCGAN(opts)
    model.setgpu(opts.gpu)
    if opts.resume is None:
        model.initialize()
        ep0 = -1
        total_it = 0
    else:
        ep0, total_it = model.resume(opts.resume)
    ep0 += 1
    print('start the training at epoch %d'%(ep0))

    # saver for display and output
    saver = Saver(opts)

    # train
    print('\n--- train ---')
    max_it = 200000
    for ep in range(ep0, opts.n_ep):
        for it, (images, label) in enumerate(train_loader):
            if images.size(0) != opts.batch_size:
                continue
            # input data
            images = images.cuda(opts.gpu).detach()
            # update model
            model.update_D(images, label)
            model.update_G()

            # save to display file
            if not opts.no_display_img:
                saver.write_display(total_it, model)

            print('total_it: %d (ep %d, it %d), lr %08f' % (total_it, ep, it, model.gen_opt.param_groups[0]['lr']))
            total_it += 1
            if total_it >= max_it:
                saver.write_img(-1, model)
                saver.write_model(-1, max_it, model)
                break

        # save result image
        saver.write_img(ep, model)
        # Save network weights
        saver.write_model(ep, total_it, model)
    return
Esempio n. 18
0
    def onStart(self, input_list):

        saver = Saver("data/")

        try:

            return_dict = saver.load_python_obj("interleavings")
            return return_dict
        except:

            probabilistic_interleavings_list = []
            team_draft_interleavings_list = []
            self.distribution = softmax(
                [norm.pdf(x, 0, 1.5) for x in range(3)])

            for number, category in enumerate(input_list.values()):
                print("\nStart interleaving category {}".format(number))

                local_probabilistic_interleavings_list = []
                local_team_draft_interleavings_list = []

                for pair_number, (ranking1, ranking2) in enumerate(category):

                    if (pair_number % (int(len(category)) / 10) == 0):
                        print("\r{} out of {} done".format(
                            pair_number, len(category)),
                              end='')

                    try:
                        probabilistic_interleaving = ProbabilisticInterleaving(
                            ranking1, ranking2, self.distribution)
                        probabilistic_interleaving.cut_off_at(3)
                        local_probabilistic_interleavings_list.append(
                            probabilistic_interleaving)
                    except:
                        pass

                    try:
                        draft_interleaving = TeamDraftInterleaving(
                            ranking1, ranking2)
                        draft_interleaving.cut_off_at(3)
                        local_team_draft_interleavings_list.append(
                            draft_interleaving)
                    except:
                        pass

                probabilistic_interleavings_list.append(
                    local_probabilistic_interleavings_list)
                team_draft_interleavings_list.append(
                    local_team_draft_interleavings_list)

            print("\n\n")

            return_dict = {
                "probabilistic": probabilistic_interleavings_list,
                "team_draft": team_draft_interleavings_list
            }
            saver.save_python_obj(return_dict, "interleavings")
            return return_dict
Esempio n. 19
0
 def restore(self, sess):
     skip_var_names = self.skip_var_names.split(
         ',') if self.skip_var_names else []
     var_list = Saver.get_var_list_of_ckpt_dir(
         self.ckpt_dir, skip_var_names=skip_var_names)
     print('Restoring {} variables...'.format(len(var_list)))
     Saver(self.ckpt_dir,
           var_list=var_list).restore_at_itr(sess, restore_itr=self.itr)
Esempio n. 20
0
 def test_data_persistence(self):
     if os.path.exists("persistence.jsonlines"):
         os.remove("persistence.jsonlines")
     new_measurement = Saver("persistence.jsonlines") #luo uusi mittaussessio saveri
     new_measurement.save_measurement(69.69, 96, 13, datetime(1952, 12, 24, 16, 12, 59, 13))
     new_measurement.save_measurement(15, 50, 16, datetime(1992, 11, 17, 15, 00, 59, 12)) #kaksi mittaustulosta lisätään
     #lue heti ennen kuin pysäyttämistä onko ne kaksi mittaustulosta siellä vai ei
     meas = new_measurement.read_measurements()
     self.assertEqual(len(meas), 2)
     self.assertEqual(meas[0]["temperature"], 69.69)
     self.assertEqual(meas[1]["moisture"], 50)
     new_measurement.stop_recording() #pysäytä
     new_measurement2 = Saver("persistence.jsonlines") #luo uusi mittaussessio samalla nimellä kuin edellinen
     meas2 = new_measurement2.read_measurements()
     self.assertEqual(len(meas2), 2)
     self.assertEqual(meas2[0]["temperature"], 69.69)
     self.assertEqual(meas2[1]["moisture"], 50)       #mitä tapahtuuu lue heti mittaustulokset read mEASUrements --> onko samat kun aiemmat..?
Esempio n. 21
0
    def __init__(self):

        os.environ[
            'GOOGLE_APPLICATION_CREDENTIALS'] = "credentials/google-nlp-api.json"
        # Instantiates a client
        self.client = language.LanguageServiceClient()

        self.saver = Saver()
Esempio n. 22
0
 def test_moisture_read(self):
     if os.path.exists("dataread.jsonlines"):
         os.remove("dataread.jsonlines")
     new_measurement = Saver("dataread.jsonlines") #luo uusi mittaussessio saveri
     new_measurement.save_measurement(69.69, 96, 13, datetime(1952, 12, 24, 16, 12, 59, 13))
     new_measurement.save_measurement(15, 50, 16, datetime(1992, 11, 17, 15, 00, 59, 12)) #kaksi mittaustulosta lisätään
     meas = new_measurement.read_moisture()
     self.assertEqual(meas[0], 96)
     self.assertEqual(meas[1], 50)
Esempio n. 23
0
def run(simulation_name, file_name):
    runner = ParallelRunner(simulation_name, 400, 1000, 4)
    output = runner.run()

    formatter = Formatter(output)
    for_out = formatter.format()

    saver = Saver(for_out, file_name)
    saver.save()
Esempio n. 24
0
    def __init__(self, args):
        self.args = args
        self.saver = Saver(args)
        self.writer = SummaryWriter(log_dir=self.saver.save_to_dir, flush_secs=3)
        self.logger = Logger(self.saver.save_to_dir, args).logger
        self.begin_epoch = 0

        if args.use_pretrained:
            # SpaCy embedding length
            self.args.embedding_len = 300
Esempio n. 25
0
 def scheduler_crawler(self):
     """
     定时获取代理
     :return: None
     """
     saver = Saver()
     while True:
         print('开始抓取代理...')
         saver.run()
         time.sleep(settings.crawler_interval)
Esempio n. 26
0
 def __init__(self):
     GPIO.setmode(GPIO.BCM)
     GPIO.setwarnings(False)
     self.beeper = Beeper(pins)
     self.errorBeeper = Beeper(pins)
     self.sensorReader = Sensor(pins["photoresistor"], pins["tandu"])
     self.readingLock = False
     evm = EventManager(pins["button"])
     evm.registerButtonListener(self.onButtonPress)
     self.running = True
     self.saver = Saver()
Esempio n. 27
0
def view_table() -> 'str':
    table = request.form['table']
    saver = Saver('vsearchlogDB')
    the_data = saver.get_table(table)
    table_headers = saver.get_table_header(table)
    print(table_headers)
    # breakpoint()
    # print(type(res), "\n\n\n")
    return render_template('view_table.html',
                           title=table,
                           table_headers=table_headers,
                           the_data=the_data)
Esempio n. 28
0
 def test_no_extra_rows_after_saving(self):
     if os.path.exists("kinkeri.jsonlines"):
         os.remove("kinkeri.jsonlines")
     self.saving = Saver("kinkeri.jsonlines")
     self.saving.save_measurement(69.69, 96, 13, datetime(1952, 12, 24, 16, 12, 59, 13))
     self.saving.save_measurement(15, 50, 16, datetime(1992, 11, 17, 15, 00, 59, 12))
     self.saving.stop_recording()
     self.saving.save_measurement(27, 65, 14, datetime(1994, 1, 27, 11, 00, 45, 59))
     with open('kinkeri.jsonlines') as testfile:
         rivit = testfile.read().split("\n")
         self.assertEqual(len(rivit), 3)
     open('kinkeri.jsonlines', 'w').close()
Esempio n. 29
0
    def __init__(self):
        # prepare training parameters
        if args.backbone == "vgg16":
            model = torchvision.models.vgg16_bn(True)
            # remove dropout
            model.classifier = nn.Sequential(
                *[model.classifier[i] for i in [0, 1, 3, 4, 6]])
            mean = [0.485, 0.456, 0.406]
            std = [0.229, 0.224, 0.225]
        else:
            raise NotImplementedError()
        criterion_ce = nn.CrossEntropyLoss()
        if args.loss_type == "PC1_sign":
            criterion_corr = PearsonCorrelationLoss1("sign")
        elif args.loss_type == "PC2_sign":
            criterion_corr = PearsonCorrelationLoss2("sign")
        elif args.loss_type == "PC3_sign":
            criterion_corr = PearsonCorrelationLoss3("sign")
        else:
            raise NotImplementedError()
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.model = model.to(self.device)
        self.train_loader, self.validate_loader = make_data_loader(args)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr)
        self.criterion_ce = criterion_ce.to(self.device)
        self.criterion_corr = criterion_corr.to(self.device)
        self.best_pred = 0.0

        # config saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()

        # tensorboard
        self.summary = TensorboardSummary(self.saver.experiment_dir, mean, std)
        self.writer = self.summary.create_summary()

        # resume training
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
Esempio n. 30
0
 def test_saving_and_writing_succesful(self):
     if os.path.exists("kinkeri.jsonlines"):
         os.remove("kinkeri.jsonlines")
     self.saving = Saver("kinkeri.jsonlines")
     self.saving.save_measurement(69.69, 96, 13, datetime(1952, 12, 24, 16, 12, 59, 13))
     self.saving.save_measurement(15, 50, 16, datetime(1992, 11, 17, 15, 00, 59, 12))
     self.saving.save_measurement(27, 65, 14, datetime(1994, 1, 27, 11, 00, 45, 59))
     self.saving.stop_recording()
     with open('kinkeri.jsonlines') as testfile:
         # avattu testataan
         rivit = testfile.read().split("\n")
         self.assertEqual(len(rivit), 4)
     open('kinkeri.jsonlines', 'w').close()