Beispiel #1
0
 def __init__(self, epochs: int = 10, lr: float = 1, coef: float = -2):
     super().__init__()
     self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
     self.epochs = epochs
     self.lr = lr
     self.coef = coef
     self.model = ChildNet().to(self.device)
     self.loss = MSELoss()
     self.optimizer = Adam(params=self.model.parameters(), lr=lr)
Beispiel #2
0
    def __init__(self, sizes, nonlin='tanh', device='cpu'):
        super(NNRegressor, self).__init__()

        if device == 'gpu' and torch.cuda.is_available():
            self.device = torch.device('cuda:0')
        else:
            self.device = torch.device('cpu')

        self.sizes = sizes

        nlist = dict(relu=torch.relu,
                     tanh=torch.tanh,
                     splus=nn.Softplus,
                     softmax=torch.log_softmax,
                     linear=func.linear)

        self.nonlin = nlist[nonlin]
        self.l1 = nn.Linear(self.sizes[0], self.sizes[1]).to(self.device)
        self.l2 = nn.Linear(self.sizes[1], self.sizes[2]).to(self.device)
        self.output = nn.Linear(self.sizes[2], self.sizes[3]).to(self.device)

        self.criterion = MSELoss().to(self.device)
        self.optim = None

        self.target_size = self.sizes[-1]
        self.input_size = self.sizes[0]

        self.input_trans = None
        self.target_trans = None
Beispiel #3
0
def run_detetmenant(in_features, n_iterations=100, size=100):
    coder = DetNet(in_features)
    dataset = [FloatTensor(make_spd_matrix(in_features)).unsqueeze(0) for _ in range(size)]

    dataset = Variable(torch.cat(dataset, 0))

    test_size = 30

    train_dataset = dataset[:-test_size]
    test_dataset = dataset[-test_size:]

    optimizer = optim.Adam(coder.parameters(), lr=0.1)
    criterion = MSELoss()

    for epoch in tqdm(range(1, n_iterations)):
        optimizer.zero_grad()

        outputs_train = coder(train_dataset)
        outputs_test = coder(test_dataset)

        loss_train = criterion(outputs_train, train_dataset)
        loss_test = criterion(outputs_test, test_dataset)

        loss_train.backward(retain_graph=True)
        if epoch % 10 == 0:
            print("EPOCH: {0}, TRAIN LOSS: {1}, TEST LOSS".format(epoch, loss_train.data[0]), loss_test.data[0])
        optimizer.step()


    return coder, dataset
Beispiel #4
0
class Translator(BasicFamilyReg):
    def __init__(self, epochs: int = 10, lr: float = 1, coef: float = -2):
        super().__init__()
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.epochs = epochs
        self.lr = lr
        self.coef = coef
        self.model = ChildNet().to(self.device)
        self.loss = MSELoss()
        self.optimizer = Adam(params=self.model.parameters(), lr=lr)

    def fit(self, X_fathers, X_mothers, y_child):
        # X = torch.from_numpy(np.concatenate([X_fathers, X_mothers], axis=-1)).float().to(self.device)
        X_fathers = np2torch(X_fathers)
        X_mothers = np2torch(X_mothers)
        y = np2torch(y_child)
        for epoch in range(self.epochs):
            self.optimizer.zero_grad()

            output = self.model(X_fathers, X_mothers)
            loss = self.loss.forward(output, y)

            loss.backward()
            self.optimizer.step()
            print("loss", loss)

    def predict(self, X_fathers, X_mothers):
        X_fathers = np2torch(X_fathers)
        X_mothers = np2torch(X_mothers)
        with torch.no_grad():
            y_pred = self.model(X_fathers, X_mothers)
        # y_pred = y_pred + self.coef * np2torch(config.age_kid_direction)
        return self.add_random_gender(y_pred)
Beispiel #5
0
 def __init__(self, env):
     self.env = env
     action_shape = env.action_space.n
     self.action_embed = ActionEmbedding(env)
     self.rand_net = AtariNoveltyNet((210, 160), action_shape)
     self.dist_net = AtariNoveltyNet((210, 160), action_shape)
     self.optimizer = Adam(lr=1e-3, params=self.dist_net.parameters())
     self.criterion = MSELoss()
     self.device = 'cpu'
Beispiel #6
0
def get_lossfn(params, encoder, data):
    if params.loss == "mse":
        return MSELoss()
    elif params.loss == "cosine":
        return CosineLoss()
    elif params.loss == "ce":
        return CrossEntropyLoss(ignore_index=0)  # ignore padding symbol
    elif params.loss == "fliploss":
        if params.baseloss == "cosine":
            baseloss = CosineLoss()
        elif params.baseloss == "mse":
            baseloss = MSELoss()
        else:
            raise ValueError("Unknown base loss {params.baseloss}.")

        bclf = train_binary_classifier(data['Sx'], data['Sy'], encoder, params)
        params.latent_binary_classifier = bclf
        return FlipLoss(baseloss, bclf, lambda_clfloss=params.lambda_clfloss)
Beispiel #7
0
def ratio_mse_den(log_p_pred,
                  log_r_pred,
                  t_pred,
                  y_true,
                  r_true,
                  t_true,
                  log_r_clip=10.):
    r_true = torch.clamp(r_true, np.exp(-log_r_clip), np.exp(log_r_clip))
    r_pred = torch.exp(torch.clamp(log_r_pred, -log_r_clip, log_r_clip))
    return MSELoss()(y_true * r_pred, y_true * r_true)
Beispiel #8
0
def _loss(input_x, output_x, lat_mu, lat_var) -> Variable:
    mse_loss = MSELoss()(input_x, output_x)

    kl_loss = 0.5 * torch.sum(
        torch.exp(lat_var) + lat_mu.pow(2) - 1.0 - lat_var)

    print('mse_loss ' + str(mse_loss))
    print('kl_loss ' + str(kl_loss))

    return mse_loss + kl_loss
Beispiel #9
0
def ratio_mse_num(log_p_pred,
                  log_r_pred,
                  t_pred,
                  y_true,
                  r_true,
                  t_true,
                  log_r_clip=10.):
    r_true = torch.clamp(r_true, np.exp(-log_r_clip), np.exp(log_r_clip))
    r_pred = torch.exp(torch.clamp(log_r_pred, -log_r_clip, log_r_clip))
    return MSELoss()((1. - y_true) * (1. / r_pred),
                     (1. - y_true) * (1. / r_true))
Beispiel #10
0
    def __init__(self, model):
        '''
        '''

        # initialize the module using super() constructor
        super(RegressionTrain, self).__init__()
        # assign the architectures
        self.model = model
        # assign the weights for each task
        self.weights = torch.nn.Parameter(torch.ones(model.n_tasks).float())
        # loss function
        self.mse_loss = MSELoss()
Beispiel #11
0
    def __init__(self, state_dim, action_dim, learning_rate, tau, gamma):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.tau = tau
        self.gamma = gamma

        self.critic_network = self.create_critic_network()
        self.target_critic_network = self.create_critic_network()

        self.optimizer = optim.Adam(self.critic_network.parameters(),
                                    lr=learning_rate)
        self.loss = MSELoss()
Beispiel #12
0
def _loss(input_x,
          output_x,
          lat_mu,
          lat_var,
          real_one_hot,
          gen_one_hot,
          show_partial=False) -> Variable:
    mse_loss = MSELoss()(input_x, output_x)

    kld_loss = torch.mean(
        -0.5 * torch.sum(1 + lat_var - lat_mu**2 - lat_var.exp(), dim=1),
        dim=0)

    # KL divergence policzyć jak nalezy
    # dane syntetyczne

    deemb_loss = MSELoss()(real_one_hot, gen_one_hot)

    # return deemb_loss

    if show_partial:
        print('\tmse_loss ' + str(mse_loss.item()))
        print('\tkld_loss ' + str(kld_loss.item()))
        print('\tdeemb_loss ' + str(deemb_loss.item()))

    return mse_loss + kld_loss * 0.0001 + deemb_loss
Beispiel #13
0
def run_test_mutag(in_features, n_iterations=2000):
    coder = MatrixEncoder(in_features)
    dataset = dt.build_dataset(dt.read_test_graphs(), nodes_number=in_features)[0]
    dataset = [dt.adjacency_tensor(x).unsqueeze(0) for x in dataset]
    shuffle(dataset)
    dataset = Variable(torch.cat(dataset, 0))

    test_size = 30

    train_dataset = dataset[:-test_size]
    test_dataset = dataset[-test_size:]

    optimizer = optim.Adam(coder.parameters(), lr=3e-1)
    criterion = MSELoss()

    loss_data = []

    for epoch in tqdm(range(1, n_iterations)):
        optimizer.zero_grad()

        outputs_train = coder(train_dataset)
        outputs_test = coder(test_dataset)

        loss_train = criterion(outputs_train, train_dataset)
        loss_test = criterion(outputs_test, test_dataset)

        loss_train.backward(retain_graph=True)
        if epoch % 10 == 0:
            print("EPOCH: {0}, TRAIN LOSS: {1}, TEST LOSS".format(epoch, loss_train.data[0]), float(loss_test.data[0]))

        if epoch == 1000:
            optimizer.state_dict()['param_groups'][0]['lr'] == optimizer.state_dict()['param_groups'][0]['lr'] * 0.1

        loss_data.append((loss_train.data[0], float(loss_test.data[0])))

        optimizer.step()

    fig = plt.figure()
    loss_data = np.array(loss_data)
    plt.plot(range(len(loss_data[20:])), loss_data[20:, 0], label='train', c='r')
    plt.plot(range(len(loss_data[20:])), loss_data[20:, 1], label='test', c='b')
    plt.title("MSELoss per epoch for train/test")
    plt.xlabel('epoch')
    plt.ylabel('MSELoss')
    plt.grid()
    plt.legend()
    plt.show()

    fig.savefig('loss.pdf', format='pdf')

    return coder, dataset, loss_data
def train(player_model, player_idx, states, actions, is_winner):
    # Get rewards for each state
    rewards = [
        REWARD_SIZE * (1 - EPSILON_DECAY)**i for i in range(len(states))
    ][::-1]
    if not is_winner:
        rewards = [-r for r in rewards]

    # print("Rewards:", rewards)

    # Get model predictions
    cnn_inputs = [grid_to_cnn_input(s + 1, NUM_PLAYERS + 1) for s in states]
    cnn_inputs = torch.cat(cnn_inputs)
    cnn_outputs = player_model(cnn_inputs)

    # Compute pseudo-ground truth
    cnn_gt = []
    out = cnn_outputs.detach().numpy()
    for s, r, a, y in zip(states, rewards, actions, out):
        gt = np.where(s == -1, y, 0)  # empty space
        gt.ravel()[a] = y.ravel()[a] + r  # taken action
        gt = gt.clip(0, 1, out=gt)
        cnn_gt.append(gt)

    # Create tensors
    cnn_gt = torch.tensor(cnn_gt)

    # Compute loss
    loss = MSELoss()(cnn_outputs, cnn_gt)
    print(loss)

    # Backprop gradients
    player_model.zero_grad()
    loss.backward()

    # Update weights
    optimizer = Adam(player_model.parameters(), lr=LEARNING_RATE)
    optimizer.step()
Beispiel #15
0
    def __init__(self, name, config):
        super().__init__(name)
        self.loss: nn.Module
        self.mode = config.mode
        if config.mode == 'regression':
            self.loss = MSELoss()
        elif config.mode == 'classification':
            self.loss = CrossEntropyLoss()
        else:
            raise ValueError(
                f'config.mode must be in [regression, classification] but was {config.mode}'
            )

        self.head = FinetuneHead(config)
Beispiel #16
0
    def __init__(self, log, tb_writer, args):
        super(self.__class__, self).__init__()
        self.log = log
        self.tb_writer = tb_writer
        self.args = args
        self.loss_fn = MSELoss()

        self.net = OmniglotNet(self.loss_fn, args).to(device)

        self.fast_net = InnerLoop(self.loss_fn, args).to(device)

        self.opt = Adam(self.net.parameters(), lr=args.meta_lr)
        self.sampler = BatchSampler(args)
        self.memory = ReplayBuffer()
Beispiel #17
0
    def __init__(self,policy_value_model=None):
        # create the model
        if(policy_value_model == None):
            self.model = resnet_policy_value_model()#torch_policy_value_model()
        else:
            self.model = policy_value_model
        learning_rate = 1e-3

        self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)

        self.policy_loss = nn.PoissonNLLLoss()
        # self.policy_loss = MSELoss()
        # self.policy_loss = nn.CrossEntropyLoss()
        self.value_loss = MSELoss()
Beispiel #18
0
    def __init__(self, sizes, bandwidth):
        super(FourierRegressor, self).__init__()

        self.bandwidth = bandwidth

        self.sizes = sizes

        self.target_size = self.sizes[-1]
        self.input_size = self.sizes[0]
        self.hidden_size = self.sizes[1]

        self.basis = FourierFeatures(self.sizes, self.bandwidth)
        self.output = nn.Linear(self.hidden_size, self.target_size)

        self.criterion = MSELoss()
        self.optim = None

        self.input_trans = None
        self.target_trans = None
Beispiel #19
0
    def __init__(
        self,
        policy_thunk,
        value_fn_thunk,
        experience_replay,
        policy_lr,
        value_fn_lr,
        gamma,
        polyak,
        learning_start,
        steps_per_train,
        batch_size,
        l2_coeff,
        max_timesteps,
    ):
        self.online_policy = policy_thunk()
        self.online_policy.cuda()
        self.target_policy = policy_thunk()
        self.target_policy.cuda()
        self.target_policy.load_state_dict(self.online_policy.state_dict())

        self.online_value_fn = value_fn_thunk()
        self.online_value_fn.cuda()
        self.target_value_fn = value_fn_thunk()
        self.target_value_fn.cuda()
        self.target_value_fn.load_state_dict(self.online_value_fn.state_dict())

        self.experience_replay = experience_replay
        self.gamma = gamma
        self.polyak = polyak
        self.total_steps = 0

        self.policy_opt = optim.Adam(self.online_policy.parameters(),
                                     policy_lr,
                                     weight_decay=l2_coeff)
        self.value_fn_opt = optim.Adam(self.online_value_fn.parameters(),
                                       value_fn_lr,
                                       weight_decay=l2_coeff)
        self.value_fn_criterion = MSELoss()
        self.learning_start = learning_start
        self.steps_per_train = steps_per_train
        self.batch_size = batch_size
        self.max_timesteps = max_timesteps
Beispiel #20
0
def run_test(in_features, n_iterations=100, size=500):

    coder = MatrixEncoder(in_features)
    dataset = [FloatTensor(10*make_spd_matrix(in_features)).unsqueeze(0) for _ in range(size)]

    dataset = Variable(torch.cat(dataset, 0))

    test_size = 10

    train_dataset = dataset[:-test_size]
    test_dataset = dataset[-test_size:]

    optimizer = optim.Adam(coder.parameters(), lr=0.1)
    criterion = MSELoss()

    loss_data = []

    for epoch in tqdm(range(1, n_iterations)):
        optimizer.zero_grad()

        outputs_train = coder(train_dataset)
        outputs_test = coder(test_dataset)

        loss_train = criterion(outputs_train, train_dataset)
        loss_test = criterion(outputs_test, test_dataset)

        loss_train.backward(retain_graph=True)
        if epoch % 10 == 0:
            print("EPOCH: {0}, TRAIN LOSS: {1}, TEST LOSS".format(epoch, loss_train.data[0]), float(loss_test.data[0]))

        loss_data.append((loss_train.data[0], float(loss_test.data[0])))

        optimizer.step()

    loss_data = np.array(loss_data)
    plt.plot(range(len(loss_data[20:])), loss_data[20:, 0], label='train')
    plt.plot(range(len(loss_data[20:])), loss_data[20:, 1], label='test')
    plt.legend()
    plt.show()

    return coder, dataset
def train_rae(retrain=False):
    # Recurrent Autoencoder
    optimizer_parameters = {
        "lr": 0.001,
    }
    criterion = MSELoss(reduction="sum")
    optimizer = Adam(word_rae.parameters(), **optimizer_parameters)
    mt = CriterionTrainer(
        criterion=criterion,
        model=word_rae,
        optimizer=optimizer,
        batch_size=batch_size,
        max_epochs=max_epochs,
        training_data=train_data,
        validation_data=validation_data,
        clip_max_norm=0.15,
    )
    mt.model_name = "WordRAE"
    if not retrain:
        mt.restore_checkpoint()
    mt.train(progress_bar='epoch')
Beispiel #22
0
    def __init__(self,configuration,learning_rate=1e-3,load_model=None):
        # load any saved models
        # Number of Columns on the Board.
        self.columns = configuration.columns
        # Number of Rows on the Board.
        self.rows = configuration.rows
        # Number of Checkers "in a row" needed to win.
        self.inarow = configuration.inarow

        if(load_model == None):
            self.model = ResNetModel(self.columns,self.rows) 
        else:
            self.model = load_model

        self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)

        self.policy_loss = nn.KLDivLoss()
        #self.policy_loss = nn.PoissonNLLLoss()
        # self.policy_loss = MSELoss()
        # self.policy_loss = nn.CrossEntropyLoss()
        self.value_loss = MSELoss()
Beispiel #23
0
    def fit(self, points, features, scores, n_epoch=2000, lr=5e-3):
        datasets = []
        optimizer = optim.Adam(params=self.net.parameters(), lr=lr)
        mseLoss = MSELoss()
        for p, f, s in zip(points, features, scores):
            one_set = TensorDataset(to_tensor(p), to_tensor(f), to_tensor(s))
            datasets.append(one_set)
        train_loader = DataLoader(ConcatDataset(datasets[:-1]),
                                  batch_size=1,
                                  shuffle=True)
        test_loader = DataLoader(ConcatDataset(datasets[-1:]),
                                 batch_size=1,
                                 shuffle=False)
        for e in range(n_epoch):
            print(f"Epoch: {e}")
            for i, (p, f, s) in enumerate(train_loader):
                out = self.net(p, f)
                loss = mseLoss(out, s)
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

            sum_loss = 0
            with torch.no_grad():
                for i, (p, f, s) in enumerate(train_loader):
                    out = self.net(p, f)
                    loss = mseLoss(out, s)
                    sum_loss += loss
            sum_loss /= len(train_loader)
            print(f"Train Loss: {sum_loss.item()}")

            sum_loss = 0
            with torch.no_grad():
                for i, (p, f, s) in enumerate(test_loader):
                    out = self.net(p, f)
                    loss = mseLoss(out, s)
                    sum_loss += loss
            sum_loss /= len(test_loader)
            print(f"Test Loss: {sum_loss.item()}")
    def forward(
        self,
        input_ids=None,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        inputs_embeds=None,
        labels=None,
    ):
        outputs = self.albert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
        )

        pooled_output = outputs[1]

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

        outputs = (logits,) + outputs[2:]  # add hidden states and attention if they are here

        if labels is not None:
            if self.num_labels == 1:
                #  We are doing regression
                loss_fct = MSELoss()
                loss = loss_fct(logits.view(-1), labels.view(-1))
            else:
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            outputs = (loss,) + outputs

        return outputs  # (loss), logits, (hidden_states), (attentions)
Beispiel #25
0
def create_setting_pretraindcae(model=None):
    """
    This part is fixed for pretrain DCAE for mnist from paper Deep one-class classification setting.
    adam are used in paper.
    """
    setting = {}
    if cfg.pretrain_solver == 'adam':
        optimizer = torch.optim.Adam(params=model.parameters(), lr=cfg.pretrain_lr)
    elif cfg.pretrain_solver == 'sgd':
        optimizer = torch.optim.SGD(lr=cfg.pretrain_lr, momentum=cfg.pretrain_momentum, nesterov=True, params=model.parameters())
    else:
        raise ValueError('invalid pretrain solver for using: {}'.format(cfg.pretrain_solver))
    setting['optim'] = optimizer

    if cfg.ae_loss == 'l2':
        #from loss import MSEReconstructionError
        #loss = MSEReconstructionError()
        print('using MSE')
        loss = MSELoss(reduction='none')
    if cfg.ae_loss == 'ce':
        loss = BCELoss()

    setting['criterion'] = loss
    return setting
Beispiel #26
0
 def __init__(self,
              gamma,
              epsilon_start,
              epsilon_end,
              epsilon_decay,
              alpha,
              target_update,
              max_iter,
              tau,
              batch_size=16,
              dropout_ratio=0.25):
     self.gamma = gamma
     self.epsilon_start = epsilon_start
     self.epsilon_end = epsilon_end
     self.epsilon_decay = epsilon_decay
     self.alpha = alpha
     self.target_update = target_update
     self.max_iter = max_iter
     self.batch_size = batch_size
     self.dropout_ratio = dropout_ratio
     self.tau = tau
     self.tag = "g" + str(self.gamma) + "e" + str(self.epsilon_decay) + "lr" + str(self.alpha) + "t" \
                + str(self.target_update) + "b" + str(self.batch_size) + "d" + str(self.dropout_ratio) + "tau" + str(self.tau)
     self.memory = ReplayMemory(5000, self.batch_size)
     self.env = gym.make("LunarLander-v2")
     self.n_actions = self.env.action_space.n
     self.device = torch.device(
         "cuda" if torch.cuda.is_available() else "cpu")
     self.policy_net = DQN(self.dropout_ratio)
     self.target_net = DQN(self.dropout_ratio)
     self.policy_net = self.policy_net.float()
     self.target_net = self.target_net.float()
     self.target_net.load_state_dict(self.policy_net.state_dict())
     self.optimizer = torch.optim.Adam(self.policy_net.parameters(),
                                       lr=self.alpha)
     self.loss = MSELoss()
Beispiel #27
0
    def __init__(
        self,
        model_thunk,
        experience_replay,
        lr,
        gamma,
        learning_start,
        steps_per_train,
        batch_size,
        max_timesteps,
        final_eps,
        exploration_fraction,
        steps_per_target_update,
        adam_eps,
    ):
        self.online_network = model_thunk()
        self.online_network.cuda()
        self.target_network = model_thunk()
        self.target_network.cuda()
        self.target_network.load_state_dict(self.online_network.state_dict())
        self.experience_replay = experience_replay
        self.eps = 1.0
        self.gamma = gamma
        self.total_steps = 0

        self.opt = optim.Adam(self.online_network.parameters(),
                              lr,
                              eps=adam_eps)
        self.criterion = MSELoss()
        self.learning_start = learning_start
        self.steps_per_train = steps_per_train
        self.batch_size = batch_size
        self.max_timesteps = max_timesteps
        self.final_eps = final_eps
        self.exploration_fraction = exploration_fraction
        self.steps_per_target_update = steps_per_target_update
valid_sampler = SubsetRandomSampler(val_indices)

train_loader = DataLoader(dataset,
                          batch_size=256,
                          num_workers=0,
                          sampler=train_sampler)
valid_loader = DataLoader(dataset,
                          batch_size=256,
                          num_workers=0,
                          sampler=valid_sampler)

model = LSTMModel(no_outputs=1)
model = model.cuda()
optimizer = optim.RMSprop(model.parameters(), lr=1e-3)
scheduler = StepLR(optimizer, 1, 0.999)
criterion = MSELoss()
criterion = criterion.cuda()

print_each = 20


def train(epoch):
    running_loss = 0.0
    model.train()
    for i, data in enumerate(train_loader):
        inputs = data['images'].cuda()
        targets = data['commands'].cuda()
        optimizer.zero_grad()

        weights_a, weights_b, weights_s = find_weights(targets[:, 0], bins)
Beispiel #29
0
import torch.nn as nn
from torch.nn.modules.loss import MSELoss
from torch.optim.adam import Adam
from ..pipeline import Pipeline
from ..learner import LearningAlgorithm


# noinspection PyAbstractClass
class DummyNet(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.fc = nn.Linear(2, 2)


net = DummyNet()
learner = LearningAlgorithm(MSELoss(), Adam(net.parameters()))


class TestPipeline:
    def test_constructor_rejects_invalid_elements(self) -> None:
        # Invalid pre-processor(s)
        with pytest.raises(AssertionError):
            # noinspection PyTypeChecker
            Pipeline(1, net, learner, None)
        # Invalid net
        with pytest.raises(AssertionError):
            # noinspection PyTypeChecker
            Pipeline(None, 1, learner, None)
        with pytest.raises(AssertionError):
            # noinspection PyTypeChecker
            Pipeline(None, None, learner, None)
Beispiel #30
0
    parser.add_argument('--reduce_lr_patience',
                        required=False,
                        type=int,
                        default=3)
    parser.add_argument('--denoising_factor',
                        required=False,
                        type=float,
                        default=0.5)
    parser.add_argument('--epochs_no_improve',
                        required=False,
                        type=float,
                        default=5)
    parser.add_argument('--loss',
                        required=False,
                        type=_Loss,
                        default=MSELoss(reduction='mean'))
    parser.add_argument('--auto_encoder_embedding',
                        required=False,
                        default='tmp/reversal_auto_encoder_5000_500.pt')
    parser.add_argument('--load_attnn_model',
                        required=False,
                        type=bool,
                        default=False)
    parser.add_argument('--pseudo_label_iterations',
                        required=False,
                        type=int,
                        default=10)

    # Models parameters
    parser.add_argument('--autoencoder_shape',
                        required=False,