def main():
    train_dataset = SimpleDataset(total_num=1000,
                                  is_confused=True,
                                  x=3,
                                  y=5,
                                  seed=1)
    test_dataset = SimpleDataset(total_num=300,
                                 is_confused=False,
                                 x=3,
                                 y=5,
                                 seed=2)

    model = PassiveAggressive()

    for i in range(len(train_dataset.y)):
        model.fit(train_dataset.feature_vec[i], train_dataset.y[i])

    print(model.w)
    print(test_dataset.valid_training_result(model))
    train_dataset.show_result(model.w)
Exemplo n.º 2
0
    random.seed(args.seed)

    model = ResUNet(input_shape=(128, 128, 1),
                    classes=2,
                    filters_root=16,
                    depth=3)
    model.summary()

    if args.plot_model:
        from tensorflow.python.keras.utils.vis_utils import plot_model
        plot_model(model, show_shapes=True)

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["categorical_accuracy"])

    train_dataset = list(
        zip(*list(SimpleDataset(args.train_dataset_dir_path)())))
    train_dataset = (np.array(train_dataset[0]), np.array(train_dataset[1]))
    x = np.array(train_dataset[0])
    y = np.array(train_dataset[1])

    validation_dataset = list(
        zip(*list(SimpleDataset(args.validation_dataset_dir_path)())))
    validation_dataset = (np.array(validation_dataset[0]),
                          np.array(validation_dataset[1]))
    model.fit(x=x,
              y=y,
              validation_data=validation_dataset,
              epochs=args.epochs,
              batch_size=args.batch_size)
Exemplo n.º 3
0
from simple_dataset import SimpleDataset
from simpleOptimizer import SimpleOptimizer


class Simple(nn.Module):
    def __init__(self, indim, outdim):
        #For the time being, let's work with 2 activations: relu and identity
        super(Simple, self).__init__()
        self.applyW = nn.Linear(indim, outdim)

    def forward(self, x):
        out = self.applyW(x)
        return out


myDb = SimpleDataset()
dataLoader = DataLoader(myDb, batch_size=3, shuffle=True, num_workers=0)

simple = Simple(2, 1)
import torch.optim as optim
criterion = nn.MSELoss()
optimizer = optim.SGD(simple.parameters(), lr=0.001)

NUMBER_OF_EPOCHS = 20

for _ in range(NUMBER_OF_EPOCHS):

    for (example_num, data) in enumerate(dataLoader):
        optimizer.zero_grad()
        inputs = Variable(data['example'].float())
        targets = Variable(data['target'].float())
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser = argparse.ArgumentParser()
    parser.add_argument('--record', type=int, default=0)
    args = parser.parse_args()
    is_recorded = args.record

    train_dataset = SimpleDataset(total_num=250,
                                  is_confused=True,
                                  x=3,
                                  y=5,
                                  seed=1)
    test_dataset = SimpleDataset(total_num=100,
                                 is_confused=False,
                                 x=3,
                                 y=5,
                                 seed=2)

    model_PA = PassiveAggressive()
    model_PA_one = PassiveAggressiveOne(0.05)

    fig = plt.figure(figsize=(20, 4))
    gs = gridspec.GridSpec(1, 10)
    fig_left = fig.add_subplot(gs[0, :3])
    fig_right = fig.add_subplot(gs[0, 4:])

    fig_left.set_xlim([
        train_dataset.dataset.x1.min() - 0.1,
        train_dataset.dataset.x1.max() + 0.1
    ])
    fig_left.set_ylim([
        train_dataset.dataset.x2.min() - 0.1,
        train_dataset.dataset.x2.max() + 0.1
    ])
    fig_left.set_title("Input Data & Trained Boundary", fontsize=15)
    fig_left.tick_params(labelsize=10)

    fig_right.set_xlim([0, len(train_dataset.y)])
    fig_right.set_ylim([0, 1])
    fig_right.set_title("Test Accuracy", fontsize=15)
    fig_right.tick_params(labelsize=10)
    fig_right.set_xlabel("Number of training data", fontsize=12)
    fig_right.set_ylabel("Accuracy", fontsize=12)

    line_x = np.array(range(-10, 10, 1))
    line_y = line_x * 0
    line_PA, = fig_left.plot(line_x, line_y, c="#2980b9", label="PA")
    line_PA_one, = fig_left.plot(line_x, line_y, c="#e74c3c", label="PA-1")

    fig_left.legend(handles=[line_PA, line_PA_one], fontsize=12)
    fig_right.legend(handles=[line_PA, line_PA_one], fontsize=12)

    valid_result_sample = []
    accuracies_PA = []
    accuracies_PA_one = []
    imgs = []

    for i in range(len(train_dataset.y)):
        fig_left.scatter(x=train_dataset.dataset.x1[i],
                         y=train_dataset.dataset.x2[i],
                         c=cm.cool(train_dataset.dataset.label[i]),
                         alpha=0.5)

        model_PA.fit(train_dataset.feature_vec[i], train_dataset.y[i])
        model_PA_one.fit(train_dataset.feature_vec[i], train_dataset.y[i])

        accuracies_PA.append(test_dataset.valid_training_result(model_PA))
        accuracies_PA_one.append(
            test_dataset.valid_training_result(model_PA_one))

        a, b, c = model_PA.w
        line_y = (a * line_x + c) / (-b)
        line_PA.set_data(line_x, line_y)

        a, b, c = model_PA_one.w
        line_y = (a * line_x + c) / (-b)
        line_PA_one.set_data(line_x, line_y)

        fig_right.plot(accuracies_PA, c="#2980b9", label="PA")
        fig_right.plot(accuracies_PA_one, c="#e74c3c", label="PA-1")

        plt.pause(0.005)

        if is_recorded == 1:
            matplotrecorder.save_frame()

    if is_recorded == 1:
        matplotrecorder.save_movie("results.mp4", 0.005)