コード例 #1
0
def main():
    model = SRCNN()
    path = './models/srcnn_Set7_8_32.ckpt'
    model.load_state_dict(torch.load(path))
    l1_weight = model.layer1[0].weight.data
    l2_weight = model.layer2[0].weight.data
    l3_weight = model.layer3[0].weight.data
    l1_weight = torch.tensor(l1_weight).type('torch.DoubleTensor')
    l2_weight = torch.tensor(l2_weight).type('torch.DoubleTensor')
    l3_weight = torch.tensor(l3_weight).type('torch.DoubleTensor')
    path = "./dataset/feature_map_layer1/"
    image = []
    for i in range(57):
        image_LR_test = []
        path_LR_test = path + str(i) + '/'
        for filename in natsorted(os.listdir(path_LR_test)):
            img_LR_test = np.loadtxt(os.path.join(path_LR_test, filename),
                                     delimiter=' ')
            #img_LR_test = img_LR_test - np.min(img_LR_test)
            #img_LR_test = img_LR_test * (63 / np.max(img_LR_test))
            #img_LR_test = np.round(img_LR_test)
            #img_LR_test = np.where(img_LR_test > 63, 63, img_LR_test)
            #img_LR_test = img_LR_test[:, :, np.newaxis]
            if img_LR_test is not None:
                #img_LR_test = img_LR_test.transpose(2,0,1)
                image_LR_test.append(img_LR_test)
        image.append(image_LR_test)

    image = torch.tensor(image)
    print(image[0][0])
    m = LeakyReLU(0.5)
    #out = CimConv2d(image_LR_test, l1_weight)
    out = image
    out = 8 * out
    out = torch.floor(out)
    out = m(out)
    out[out > 63] = 63
    out[out < -63] = -63
    out = CimConv2d(out, l2_weight)
    out = torch.floor(out)
    out = m(out)
    out = nn.functional.conv2d(out, l3_weight, None, 1, 2)
    out_img = copy.deepcopy(out)
    out_img -= (out_img.min())
    out_img *= (255 / out_img.max())

    img_HR = (torch.round(out_img[30])).to('cpu').numpy().transpose(1, 2, 0)
    print(img_HR.shape)
    cv2.imwrite('rspi.jpg', img_HR)
コード例 #2
0
 def __init__(
     self,
     c1,
     c2,
     n=1,
     shortcut=False,
     g=1,
     e=0.5,
     activation_type='hardswish',
 ):  # ch_in, ch_out, number, shortcut, groups, expansion
     super(BottleneckCSP2Leaky, self).__init__()
     Conv_ = functools.partial(Conv, activation_type=activation_type)
     Bottleneck_ = functools.partial(Bottleneck,
                                     activation_type=activation_type)
     c_ = int(c2)  # hidden channels
     self.cv1 = Conv_(c1, c_, 1, 1)
     self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False)
     self.cv3 = Conv_(2 * c_, c2, 1, 1)
     self.bn = nn.BatchNorm2d(2 * c_)
     self.act = LeakyReLU(negative_slope=0.1)
     self.m = nn.Sequential(
         *[Bottleneck_(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
コード例 #3
0
 def __init__(self,
              c1,
              c2,
              n=1,
              shortcut=False,
              g=1,
              e=0.5,
              k=(5, 9, 13),
              activation_type='hardswish'):
     super(SPPCSPLeaky, self).__init__()
     Conv_ = functools.partial(Conv, activation_type=activation_type)
     c_ = int(2 * c2 * e)  # hidden channels
     self.cv1 = Conv_(c1, c_, 1, 1)
     self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
     self.cv3 = Conv_(c_, c_, 3, 1)
     self.cv4 = Conv_(c_, c_, 1, 1)
     self.m = nn.ModuleList(
         [nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
     self.cv5 = Conv_(4 * c_, c_, 1, 1)
     self.cv6 = Conv_(c_, c_, 3, 1)
     self.bn = nn.BatchNorm2d(2 * c_)
     self.act = LeakyReLU(negative_slope=0.1)
     self.cv7 = Conv_(2 * c_, c2, 1, 1)
コード例 #4
0
def make_default_config(dataset_name):
    timefeat = TimeFeatType.timefeat
    (
        n_timefeat,
        n_staticfeat,
        n_latent,
        freq,
        cardinalities,
        prediction_length_rolling,
        prediction_length_full,
    ) = get_n_feat_and_freq(dataset_name=dataset_name, timefeat=timefeat)
    assert len(cardinalities["cardinalities_feat_static_cat"]) == 1
    n_static_embedding = min(
        50, (cardinalities["cardinalities_feat_static_cat"][0] + 1) // 2
    )
    n_ctrl_all = n_ctrl_static = n_ctrl_dynamic = 64

    # n_ctrl_static = n_static_embedding
    # n_ctrl_dynamic = 32
    # n_ctrl_all = n_ctrl_static + n_ctrl_dynamic  # we cat

    dims = TensorDims(
        timesteps=past_lengths[dataset_name],
        particle=10,
        batch=50,
        state=n_latent,
        target=1,
        switch=5,
        # ctrl_state=None,
        # ctrl_switch=n_staticfeat + n_timefeat,
        # ctrl_obs=n_staticfeat + n_timefeat,
        ctrl_state=n_ctrl_dynamic,
        ctrl_target=n_ctrl_static,
        ctrl_switch=n_ctrl_all,  # switch takes cat feats
        ctrl_encoder=n_ctrl_all,  # encoder takes cat feats
        timefeat=n_timefeat,
        staticfeat=n_staticfeat,
        cat_embedding=n_static_embedding,
        auxiliary=None,
    )

    config = RsglsIssmGtsExpConfig(
        experiment_name="rsgls",
        dataset_name=dataset_name,
        #
        n_epochs=50,
        n_epochs_no_resampling=5,
        n_epochs_freeze_gls_params=1,
        n_epochs_until_validate_loss=1,
        lr=5e-3,
        weight_decay=1e-5,
        grad_clip_norm=10.0,
        num_samples_eval=100,
        batch_size_val=100,  # 10
        # gpus=tuple(range(3, 4)),
        # dtype=torch.float64,
        # architecture, prior, etc.
        state_prior_scale=1.0,
        state_prior_loc=0.0,
        make_cov_from_cholesky_avg=True,
        extract_tail_chunks_for_train=False,
        switch_link_type=SwitchLinkType.individual,
        switch_link_dims_hidden=(64,),
        switch_link_activations=nn.LeakyReLU(0.1, inplace=True),
        recurrent_link_type=SwitchLinkType.individual,
        is_recurrent=True,
        n_base_A=20,
        n_base_B=20,
        n_base_C=20,
        n_base_D=20,
        n_base_Q=20,
        n_base_R=20,
        n_base_F=20,
        n_base_S=20,
        requires_grad_R=True,
        requires_grad_Q=True,
        requires_grad_S=True,
        # obs_to_switch_encoder=True,
        # state_to_switch_encoder=False,
        switch_prior_model_dims=tuple(),
        # TODO: made assumption that this is used for ctrl_state...
        input_transform_dims=(64,) + (dims.ctrl_state,),
        switch_transition_model_dims=(64,),
        # state_to_switch_encoder_dims=(64,),
        obs_to_switch_encoder_dims=(64,),
        b_fn_dims=tuple(),
        d_fn_dims=tuple(),  # (64,),
        switch_prior_model_activations=LeakyReLU(0.1, inplace=True),
        input_transform_activations=LeakyReLU(0.1, inplace=True),
        switch_transition_model_activations=LeakyReLU(0.1, inplace=True),
        # state_to_switch_encoder_activations=LeakyReLU(0.1, inplace=True),
        obs_to_switch_encoder_activations=LeakyReLU(0.1, inplace=True),
        b_fn_activations=LeakyReLU(0.1, inplace=True),
        d_fn_activations=LeakyReLU(0.1, inplace=True),
        # initialisation
        init_scale_A=0.95,
        init_scale_B=0.0,
        init_scale_C=None,
        init_scale_D=0.0,
        init_scale_R_diag=[1e-5, 1e-1],
        init_scale_Q_diag=[1e-4, 1e0],
        init_scale_S_diag=[1e-5, 1e-1],
        # set from outside due to dependencies.
        dims=dims,
        freq=freq,
        time_feat=timefeat,
        add_trend=add_trend_map[dataset_name],
        prediction_length_rolling=prediction_length_rolling,
        prediction_length_full=prediction_length_full,
        normalisation_params=normalisation_params[dataset_name],
        LRinv_logdiag_scaling=1.0,
        LQinv_logdiag_scaling=1.0,
        A_scaling=1.0,
        B_scaling=1.0,
        C_scaling=1.0,
        D_scaling=1.0,
        LSinv_logdiag_scaling=1.0,
        F_scaling=1.0,
        eye_init_A=True,
    )
    return config
コード例 #5
0
def make_default_config(dataset_name):
    timefeat = TimeFeatType.timefeat
    (
        n_timefeat,
        n_staticfeat,
        n_latent,
        freq,
        cardinalities,
        prediction_length_rolling,
        prediction_length_full,
    ) = get_n_feat_and_freq(dataset_name=dataset_name, timefeat=timefeat)
    assert len(cardinalities["cardinalities_feat_static_cat"]) == 1
    n_static_embedding = min(
        50, (cardinalities["cardinalities_feat_static_cat"][0] + 1) // 2)
    n_ctrl_all = n_ctrl_static = n_ctrl_dynamic = 64

    # n_ctrl_static = n_static_embedding
    # n_ctrl_dynamic = 32
    # n_ctrl_all = n_ctrl_static + n_ctrl_dynamic  # we cat

    dims = TensorDims(
        timesteps=past_lengths[dataset_name],
        particle=1,
        batch=50,
        state=16,  # n_latent,
        target=1,
        switch=None,
        ctrl_state=n_ctrl_dynamic,
        ctrl_target=n_ctrl_static,
        ctrl_switch=n_ctrl_all,
        ctrl_encoder=None,  # KVAE uses pseudo-obs only, no controls for enc.
        timefeat=n_timefeat,
        staticfeat=n_staticfeat,
        cat_embedding=n_static_embedding,
        auxiliary=5,
    )

    config = KvaeGtsExpConfig(
        experiment_name="kvae",
        dataset_name=dataset_name,
        #
        n_epochs=50,
        n_epochs_until_validate_loss=1,
        lr=5e-3,
        weight_decay=1e-5,
        grad_clip_norm=10.0,
        num_samples_eval=100,
        # Note: These batch sizes barely fit on the GPU.
        batch_size_val=10 if dataset_name
        in ["exchange_rate_nips", "wiki2000_nips", "wiki2000_nips"] else 2,
        # architecture, prior, etc.
        state_prior_scale=1.0,
        state_prior_loc=0.0,
        make_cov_from_cholesky_avg=True,
        extract_tail_chunks_for_train=False,
        switch_link_type=SwitchLinkType.shared,
        switch_link_dims_hidden=tuple(),  # linear used in KVAE LSTM -> alpha
        switch_link_activations=tuple(),
        # they have 1 Dense layer after LSTM.
        recurrent_link_type=SwitchLinkType.shared,
        n_hidden_rnn=50,
        rao_blackwellized=True,
        reconstruction_weight=1.0,  # They use 0.3 w/o rao-BW.
        dims_encoder=(64, 64),
        dims_decoder=(64, 64),
        activations_encoder=LeakyReLU(0.1, inplace=True),
        activations_decoder=LeakyReLU(0.1, inplace=True),
        n_base_A=20,
        n_base_B=20,
        n_base_C=20,
        n_base_D=None,  # KVAE does not have D
        n_base_Q=20,
        n_base_R=20,
        n_base_F=None,
        n_base_S=None,
        requires_grad_R=True,
        requires_grad_Q=True,
        requires_grad_S=None,
        input_transform_dims=tuple() + (dims.ctrl_state, ),
        input_transform_activations=LeakyReLU(0.1, inplace=True),
        # initialisation
        init_scale_A=0.95,
        init_scale_B=0.0,
        init_scale_C=None,
        init_scale_D=None,
        init_scale_R_diag=[1e-4, 1e-1],
        init_scale_Q_diag=[1e-4, 1e-1],
        init_scale_S_diag=None,
        # init_scale_S_diag=[1e-5, 1e0],
        # set from outside due to dependencies.
        dims=dims,
        freq=freq,
        time_feat=timefeat,
        add_trend=add_trend_map[dataset_name],
        prediction_length_rolling=prediction_length_rolling,
        prediction_length_full=prediction_length_full,
        normalisation_params=normalisation_params[dataset_name],
        LRinv_logdiag_scaling=1.0,
        LQinv_logdiag_scaling=1.0,
        A_scaling=1.0,
        B_scaling=1.0,
        C_scaling=1.0,
        D_scaling=1.0,
        LSinv_logdiag_scaling=1.0,
        F_scaling=1.0,
        eye_init_A=True,
    )
    return config
コード例 #6
0
def train_valid_base_text_decision_fix_text_features_model(
        model_name: str,
        single_round_label: bool,
        use_only_prev_round: bool,
        train_data_file_name: str,
        validation_data_file_name: str,
        no_history: bool = False,
        func_batch_size: int = 9,
        numbers_columns: list = None,
        add_numeric_data: bool = True):
    """
    This function train and validate model that use fix texts features only.
    :param: model_name: the full model name
    :param single_round_label: the label to use: single round of total payoff
    :param use_only_prev_round: if to use all the history or only the previous round
    :param train_data_file_name: the name of the train_data to use
    :param validation_data_file_name: the name of the validation_data to use
    :param no_history: if we don't want to use any history data
    :param func_batch_size: the batch size to use
    :param model_name: the name of the model we run
    :param numbers_columns: the names of the columns to use for the numeric data
    :param add_numeric_data: if we want to add numbers data
    :return:
    """

    reader = TextExpDataSetReader(add_numeric_data=add_numeric_data,
                                  use_only_prev_round=use_only_prev_round,
                                  single_round_label=single_round_label,
                                  three_losses=True,
                                  fix_text_features=True,
                                  no_history=no_history,
                                  numbers_columns_name=numbers_columns)
    train_data_file_inner_path = os.path.join(data_directory,
                                              train_data_file_name)
    validation_data_file_inner_path = os.path.join(data_directory,
                                                   validation_data_file_name)
    train_instances = reader.read(train_data_file_inner_path)
    validation_instances = reader.read(validation_data_file_inner_path)
    vocab = Vocabulary()

    # TODO: change this if necessary
    # batch_size should be: 10 or 9 depends on the input
    # and not shuffle so all the data of the same pair will be in the same batch
    iterator = BasicIterator(
        batch_size=func_batch_size)  # , instances_per_epoch=10)
    #  sorting_keys=[('sequence_review', 'list_num_tokens')])
    iterator.index_with(vocab)

    # the shape of the flatten data rep
    if 'bert' in train_data_file_name:  # fix features are BERT vector
        text_feedtorward = FeedForward(input_dim=reader.max_tokens_len,
                                       num_layers=2,
                                       hidden_dims=[300, 50],
                                       activations=ReLU(),
                                       dropout=[0.0, 0.0])
        reader.max_tokens_len = 50
    else:
        text_feedtorward = None
    feed_forward_input_dim = reader.max_seq_len * (reader.max_tokens_len +
                                                   reader.number_length)
    feed_forward_classification = FeedForward(input_dim=feed_forward_input_dim,
                                              num_layers=1,
                                              hidden_dims=[2],
                                              activations=LeakyReLU(),
                                              dropout=[0.3])
    criterion_classification = nn.BCEWithLogitsLoss()

    metrics_dict = {
        'Accuracy': CategoricalAccuracy()  # BooleanAccuracy(),
        # 'auc': Auc(),
        # 'F1measure': F1Measure(positive_label=1),
    }

    model = models.BasicFixTextFeaturesDecisionModel(
        vocab=vocab,
        classifier_feedforward_classification=feed_forward_classification,
        criterion_classification=criterion_classification,
        metrics_dict=metrics_dict,
        max_tokens_len=reader.max_tokens_len,
        text_feedforward=text_feedtorward,
        regularizer=RegularizerApplicator([("", L1Regularizer())]),
    )

    optimizer = optim.Adam(model.parameters(), lr=0.1)
    num_epochs = 100

    run_log_directory = utils.set_folder(
        datetime.now().strftime(
            f'{model_name}_{num_epochs}_epochs_%d_%m_%Y_%H_%M_%S'), 'logs')

    trainer = Trainer(
        model=model,
        optimizer=optimizer,
        iterator=iterator,
        train_dataset=train_instances,
        validation_dataset=validation_instances,
        num_epochs=num_epochs,
        shuffle=False,
        serialization_dir=run_log_directory,
        patience=10,
        histogram_interval=10,
    )

    model_dict = trainer.train()

    print(f'{model_name}: evaluation measures are:')
    for key, value in model_dict.items():
        if 'accuracy' in key:
            value = value * 100
        print(f'{key}: {value}')

    # save the model predictions
    model.predictions.to_csv(os.path.join(run_log_directory,
                                          'predictions.csv'))