def __init__(self, X, y, features, samples_id, depth, max_depth, lambdap=0.01, gamma=0.01, loss='mse'):
     self.lambdap = lambdap
     self.gamma = gamma
     self.loss_metric = loss
     if loss == 'mse':
         self.loss = MSE()
     elif loss == 'log':
         self.loss = LogLoss()
     super().__init__(X, y, features, samples_id, depth, max_depth)
예제 #2
0
def build_model():
    model = Sequential(MSE(), input_size=2)
    model.add_layer(Linear(2, 25))
    model.add_layer(ReLU(25))
    model.add_layer(Linear(25, 25))
    model.add_layer(ReLU(25))
    model.add_layer(Linear(25, 25))
    model.add_layer(Tanh(25))
    model.add_layer(Linear(25, 2))
    return model
예제 #3
0
    def evaluate(self, X, Y):

        if (X.ndim == 1):
            X = X.reshape(len(X), 1)

        bias = np.ones(len(X)).reshape(len(X), 1)
        X = np.concatenate((X, bias), axis=1)
        Y_pred = X.dot(self.Weights)

        loss = MSE(Y, Y_pred)

        print("Test MSE loss : ", loss)
예제 #4
0
    def compile(self, loss='mse', lr=0.001):
        global LOSS_LIST

        if (loss not in LOSS_LIST and not isinstance(loss, Loss)):
            raise Exception("Invalid loss")

        if (isinstance(loss, str)):
            if (loss == 'mse'):
                self.loss = MSE()

        else:
            self.loss = loss

        self.lr = lr
예제 #5
0
    def train(self):
        dataset = self.dataset(self.args.data,
                               self.args.gt,
                               self.args.val_size,
                               crop_div=8)
        out_dir = self._create_out_dir()

        train_dataset = dataset.get_dataset(data_type='train',
                                            batch_size=self.args.batch,
                                            repeat_count=None,
                                            crop_params=('random',
                                                         self.args.crop))

        valid_dataset = dataset.get_dataset(data_type='valid',
                                            batch_size=1,
                                            repeat_count=None,
                                            crop_params=('center',
                                                         self.args.crop))

        optimizer = tf.keras.optimizers.Adam(self.args.lr, beta_1=.5)
        self.model.compile(loss=MSE(),
                           optimizer=optimizer,
                           metrics=[PSNR(), SSIM()])

        early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_PSNR',
                                                      patience=10,
                                                      mode='max')
        csv_log = tf.keras.callbacks.CSVLogger(
            os.path.join(out_dir, f'train_{self.args.model}.log'))

        checkpoints_path = os.path.join(out_dir, self.args.model)
        saver = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoints_path + '_{epoch:03d}-{val_PSNR:.2f}.h5',
            monitor='val_PSNR')

        callbacks = [csv_log, saver]
        if not self.args.no_early_stop:
            callbacks.append(early_stop)

        self.model.fit(train_dataset,
                       epochs=self.args.epochs,
                       callbacks=callbacks,
                       validation_data=valid_dataset,
                       verbose=1,
                       validation_steps=1,
                       steps_per_epoch=dataset.numel * self.args.repeat //
                       self.args.batch)
 def __init__(self,
              n_iters=10000,
              hidden_activation=Sigmoid(),
              output_activation=Linear(),
              learning_rate=1e-2,
              n_hidden=10,
              loss=MSE(),
              mini_batch=10):
     self.n_iters = n_iters
     self.hidden_activation = hidden_activation
     self.output_activation = output_activation
     self.learning_rate = learning_rate
     self.W = None
     self.W0 = None
     self.V = None
     self.V0 = None
     self.mini_batch = mini_batch
     self.loss = loss
     self.X, self.y = None, None
     self.n_hidden = n_hidden
예제 #7
0
    def fit(self, X_train, Y_train, showfig=False):

        self.X = X_train.astype(dtype=np.float32)
        self.Y = Y_train

        self.X = self.X.reshape(len(self.X), 1)
        bias = np.ones(len(self.X)).reshape(len(self.X), 1)

        self.X = np.concatenate((self.X, bias), axis=1)
        self.Weights = np.random.rand(self.X.ndim + 1)

        self.Weights = np.linalg.inv(self.X.T.dot(self.X)).dot(
            self.X.T.dot(self.Y))

        Y_pred = self.X.dot(self.Weights)

        loss = MSE(self.Y, Y_pred)
        print("MSE loss : ", loss)

        if (showfig):

            plt.scatter(self.X[:, :-1], self.Y, c='b')
            plt.plot(self.X[:, :-1], Y_pred, c='r')
            plt.show()
예제 #8
0
    output_layer = None
    if type_of_assigment == "regression":
        p_train = RegressionProvider(train_df,
                                     batch_size=json_parser.batch_size)
        p_test = RegressionProvider(test_df, batch_size=json_parser.batch_size)
        output_layer = "linear"
    elif type_of_assigment == "classification":
        p_train = ClassifierProvider(train_df,
                                     batch_size=json_parser.batch_size)
        p_test = ClassifierProvider(test_df, batch_size=json_parser.batch_size)
        output_layer = "sigmoid"
    hidden = json_parser.layers_size
    act = json_parser.layers_activations
    act.append(output_layer)
    seed = json_parser.seed
    loss = MSE()
    number_of_iterations = json_parser.number_of_iterations

    nn = NeuralNet(inputs=p_train.number_of_inputs,
                   hidden=hidden,
                   outputs=p_train.number_of_outputs,
                   activations=act,
                   loss=loss,
                   seed=seed)

    summary = train(nn, number_of_iterations, p_train, p_test, print_weights)
    summary.show()
    visualization = VisualizationProvider(test_df, nn, type_of_assigment)
    x = visualization.show()
    visualization.show_map()
    print("Accuracy: {}".format(x))
예제 #9
0
    W1 = np.array([[0.15, 0.20], [0.25, 0.30]])
    b1 = 0.35

    W2 = np.array([[0.4, 0.45], [0.50, 0.55]])
    b2 = 0.60

    y_true = np.array([[0.01, 0.99]])

    dense = Dense(2, W1, b1)
    sigmoid = Activations('Sigmoid')
    swish1 = Activations('Swish')
    dense2 = Dense(2, W2, b2)
    swish2 = Activations('Swish')
    activation2 = Activations('Sigmoid')

    loss_fn = MSE()

    z1 = dense.forward(x)
    sig1 = sigmoid.forward(z1)
    z2 = dense2.forward(sig1)
    y_pred = activation2.forward(z2)

    sw1 = swish1.forward(z1)
    sw2 = dense2.forward(sw1)
    y_pre = swish2.forward(sw2)

    # loss = loss_fn.loss(y_true, y_pred)
    # print("loss: ", loss)
    # print("loss's mean: ",np.mean(loss))

    # sigloss = loss_fn.loss(y_true, y_pre)
예제 #10
0
def quantization():
    if args.quant_mode != 'test' and args.deploy:
        args.deploy = False
        warnings.warn(
            'Exporting xmodel needs to be done in quantization test mode, turn off it in this running!',
            UserWarning)

    if args.quant_mode == 'test' and (args.batch_size != 1
                                      or args.subset_len != 1):
        warnings.warn(
            'Exporting xmodel needs batch size to be 1 and only 1 iteration of inference, they\'ll be changed automatically!',
            UserWarning)
        args.batch_size = 1
        args.subset_len = 1

    p = Path(args.checkpoint_dir) / args.model_name
    model = FFN(args.input_size)
    model = preprocessors.load_from_state_dict(model, p)

    if args.quant_mode == 'float':
        quant_model = deepcopy(model)
    else:
        rand_input = torch.randn([args.batch_size, args.input_size])
        quantizer = torch_quantizer(args.quant_mode,
                                    module=deepcopy(model),
                                    input_args=rand_input,
                                    bitwidth=8,
                                    mix_bit=False,
                                    qat_proc=False,
                                    device=set_seed.DEVICE)

        quant_model = quantizer.quant_model

    if args.fast_finetune:
        ft_loader = preprocessors.make_dataloader(data_dir=args.data_dir,
                                                  data_file=args.calib_data,
                                                  subset_len=args.subset_len)
        if args.quant_mode == 'calib':
            loss_fn = MSE().to(set_seed.DEVICE)
            quantizer.fast_finetune(eval_loss,
                                    (quant_model, ft_loader, loss_fn))
        elif args.quant_mode == 'test':
            quantizer.load_ft_param()

    if args.evaluate:
        valid_loader = preprocessors.make_dataloader(
            data_dir=args.data_dir,
            data_file=args.calib_data,
            batch_size=args.batch_size)
        cr1 = CustomRunner(model=model,
                           device=set_seed.DEVICE,
                           input_key='features',
                           input_target_key='targets',
                           evaluate=True,
                           loaders={'test': valid_loader})
        print('Evaluation completed!')
        print('Initial model results:')
        pprint.pprint(cr1.logs, width=5)

        if args.quant_mode != 'float':
            cr2 = CustomRunner(model=quant_model,
                               device=set_seed.DEVICE,
                               input_key='features',
                               input_target_key='targets',
                               evaluate=True,
                               loaders={'test': valid_loader})
            print('Quantized model results:')
            pprint.pprint(cr2.logs, width=5)

    if args.quant_mode == 'calib':
        quantizer.export_quant_config()
    if args.deploy:
        quantizer.export_xmodel(deploy_check=True)
예제 #11
0
def train_model(model,
                lr,
                batch_size,
                momentum,
                epochs,
                train_input,
                train_target,
                test_input,
                test_target,
                loss_log=False):
    losses = []
    validation_input = test_input
    valdidation_target = test_target
    validation_loss = []
    train_loss = []
    validation_acc = []
    train_acc = []
    optimizer = SGD(model.layers, learning_rate=lr, momentum=momentum)
    criterion = MSE()
    batch_size = batch_size

    for epoch in range(epochs):
        acc_loss = 0

        for b in range(0, train_input.size(0), batch_size):
            if b + batch_size > train_input.size(0):
                mini_batch_size = train_input.size(0) - b
            else:
                mini_batch_size = batch_size
            #model forward pass
            pred = model.forward(train_input.narrow(0, b, mini_batch_size))
            ## computing loss
            loss = criterion.compute_loss(
                pred, train_target.narrow(0, b, mini_batch_size))
            acc_loss += loss.item()
            #compute gradient of loss
            grad_wrt_outputs = criterion.backward()
            #backpropagate with gradient of loss
            model.backward(grad_wrt_outputs)
            #update parameters of model
            optimizer.step()
            #reset gradient matrices to 0
            model.zero_grad()

        batches = int(train_input.size(0) / mini_batch_size
                      ) if train_input.size(0) % mini_batch_size == 0 else int(
                          train_input.size(0) / mini_batch_size) + 1
        #keeping track of loss accuracy after one epoch
        if loss_log:
            print(epoch, acc_loss / batches)
            pred_train = model.forward(train_input)
            pred_test = model.forward(test_input)
            train_loss.append(MSE().compute_loss(pred_train,
                                                 train_target).item())
            validation_loss.append(MSE().compute_loss(pred_test,
                                                      test_target).item())
            train_acc.append(computeAccuracy(model, train_input, train_target))
            validation_acc.append(
                computeAccuracy(model, test_input, test_target))
            model.zero_grad()
    return train_loss, validation_loss, train_acc, validation_acc
    W2 = np.array([[.40, .45], [.50, .55]])

    b1 = .35
    b2 = 0.60

    y_true = np.array([[.01, .99]])

    #Layers Generation
    dense = Dense(2, W1, b1)
    dense2 = Dense(2, W2, b2)

    activation1 = Sigmoid()
    # activation2=Sigmoid()
    activation2 = Activation("sigmoid")

    loss_func = MSE()

    #Forward Pass
    # Dense -> Activation -> Dense -> Activation -> y_pred

    z1 = dense.forward(x)
    a1 = activation1.forward(z1)
    print("Activation Value:", a1)

    z2 = dense2.forward(a1)
    a2 = activation2.forward(z2)
    y_pred = a2

    loss = loss_func.loss(y_true, y_pred)

    print("Individual Loss:", loss)
예제 #13
0

if __name__ == '__main__':
    fcnn = FCNN()
    times = torch.linspace(-5, 5, 100)
    training_times = torch.linspace(-5, 5, 100).unsqueeze(-1)
    print(training_times.shape)
    training_values = g(training_times, True)
    values = g(times)
    optimiser = AdamHD(fcnn.parameters(), alpha_lr=1e-8)
    #optimiser = Adam(fcnn.parameters)
    epochs = 500

    for _ in range(epochs):
        predictions = fcnn(training_times)
        loss = MSE(training_values, predictions)
        loss.backward()
        optimiser.step()
        optimiser.zero_grad()
        print(f"MSE : {loss}")

    plotting = True

    if plotting:
        plt.figure()
        with torch.no_grad():
            for i in range(5):
                predictions = fcnn(torch.unsqueeze(times, -1),
                                   training=True).squeeze()
                if i == 0:
                    plt.plot(times,