def train_autoencoder_deep_fashion():
    """Trains the autoencoder for DeepFashion."""
    print("=============================================================")
    print("================ Train AE with DeepFashion ==================")
    print("=============================================================\n")

    encoder = CifarNet(input_channels=3, num_classes=50)
    encoder = encoder.to(DEVICE)

    decoder = Decoder(input_channels=64, num_classes=50, out_channels=3)
    decoder = decoder.to(DEVICE)

    parameters = list(encoder.parameters()) + list(decoder.parameters())
    loss_fn = nn.MSELoss()

    # Observe that all parameters are being optimized
    optimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE_TRAIN)

    # Decay LR by a factor of GAMMA
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer,
                                                step_size=STEP_SIZE_TRAIN,
                                                gamma=GAMMA)

    return train(encoder, decoder, loss_fn, optimizer, scheduler, EPOCHS,
                 train_loader_deep_fashion)
Exemple #2
0
 def load(self) -> None:
     self.model = CifarNet()
     self.optimizer = SGD(self.model.parameters(), lr=0.001, momentum=0.9)
     self.lr_scheduler = MultiStepLR(self.optimizer, [10, 30, 60, 90])
     self.criterion = nn.CrossEntropyLoss()
     # Load dataset
     sample_path = os.path.join(self.sample_dir,
                                "sample-%s.csv" % self.task_id)
     df = pd.read_csv(sample_path, header=None)
     data = df.values.tolist()
     self.dataset = CifarDataset(data)
Exemple #3
0
    def test(self, device) -> dict:
        ret = {}
        model = CifarNet()
        criterion = nn.CrossEntropyLoss()

        trainer = SupervisedTrainer(model, None, criterion, device=device)

        all_correct, all_total = 0, 0

        for i in range(10):
            sample_path = os.path.join(self.sample_dir, "sample-%d.csv" % i)
            df = pd.read_csv(sample_path, header=None)
            data = df.values.tolist()
            dataset = CifarDataset(data)
            loss, correct, total = trainer.test("aggregate.pth", dataset)

            ret[str(i)] = "%.2f%%(%d/%d)" % (100 * correct / total, correct,
                                             total)

            all_correct += correct
            all_total += total

        ret["ALL"] = "%.2f%%(%d/%d)" % (100 * all_correct / all_total,
                                        all_correct, all_total)

        self.set_item("acc", all_correct / all_total)

        return ret
def train_exemplar_cnn_deep_fashion():
    """Trains the exemplar cnn model."""
    print("============================================================")
    print("============ Train ExemplarCNN with DeepFashion ============")
    print("============================================================\n")

    # number of predicted classes = number of training images
    model = CifarNet(input_channels=3, num_classes=len(train_loader_deep_fashion.dataset))
    model = model.to(DEVICE)

    loss_fn = nn.CrossEntropyLoss()

    # Observe that all parameters are being optimized
    parameters = model.parameters()
    optimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE_TRAIN)

    # Decay LR by a factor of GAMMA
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=STEP_SIZE_TRAIN, gamma=GAMMA)

    return train(model, loss_fn, optimizer, scheduler, EPOCHS, train_loader_deep_fashion)
Exemple #5
0
def train_rotation_net_deep_fashion():
    """Trains the rotation model."""
    print("============================================================")
    print("========== Train Rotation Model with DeepFashion ===========")
    print("============================================================\n")

    model = CifarNet(input_channels=3, num_classes=4)
    model = model.to(DEVICE)

    loss_fn = nn.CrossEntropyLoss()

    # Observe that all parameters are being optimized
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE_TRAIN)

    # Decay LR by a factor of GAMMA
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer,
                                                step_size=STEP_SIZE_TRAIN,
                                                gamma=GAMMA)

    return train(model, loss_fn, optimizer, scheduler, EPOCHS,
                 train_loader_deep_fashion, val_loader_deep_fashion)
def train_supervised_deep_fashion():
    """Trains the supervised model."""
    print("============================================================")
    print("============= Supervised Training DeepFashion ==============")
    print("============================================================\n")

    df_supervised_model = CifarNet(input_channels=3, num_classes=50)
    df_supervised_model = df_supervised_model.to(DEVICE)

    loss_fn = nn.CrossEntropyLoss()

    # Observe that all parameters are being optimized
    optimizer = torch.optim.Adam(df_supervised_model.parameters(),
                                 lr=LEARNING_RATE_TRAIN)

    # Decay LR by a factor of GAMMA
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer,
                                                step_size=STEP_SIZE_TRAIN,
                                                gamma=GAMMA)

    return fine_tune(df_supervised_model, loss_fn, optimizer, scheduler,
                     EPOCHS, train_loader_deep_fashion,
                     val_loader_deep_fashion)
Exemple #7
0
class TrainTask(Task):
    def __init__(self, task_id, sample_dir: Task, aggregate_dir: Task = None):
        super(TrainTask, self).__init__(task_id=str(task_id))
        self.sample_dir = sample_dir
        self.aggregate_dir = aggregate_dir

    def load(self) -> None:
        self.model = CifarNet()
        self.optimizer = SGD(self.model.parameters(), lr=0.001, momentum=0.9)
        self.lr_scheduler = MultiStepLR(self.optimizer, [10, 30, 60, 90])
        self.criterion = nn.CrossEntropyLoss()
        # Load dataset
        sample_path = os.path.join(self.sample_dir,
                                   "sample-%s.csv" % self.task_id)
        df = pd.read_csv(sample_path, header=None)
        data = df.values.tolist()
        self.dataset = CifarDataset(data)

    def train(self, device: str) -> dict:
        if self.aggregate_dir is not None:
            pre_model_path = os.path.join(self.aggregate_dir, "aggregate.pth")
        else:
            pre_model_path = None
        self.trainer = SupervisedTrainer(self.model,
                                         self.optimizer,
                                         self.criterion,
                                         self.lr_scheduler,
                                         epoch=10,
                                         device=device,
                                         init_model_path=pre_model_path,
                                         console_out="console.out")

        _, correct, total = self.trainer.test(pre_model_path, self.dataset)
        ret = {
            "init_acc":
            "%.2f%%(%d/%d)" % (100 * correct / total, correct, total)
        }

        self.trainer.mount_dataset(self.dataset)

        ret.update(self.trainer.train())
        return ret
    "*****************************************************************************************"
)
print("Dev set size: {}".format(dataset.dev.size))
print(
    "*****************************************************************************************"
)
print("Test set size: {}".format(dataset.test.size))
print(
    "========================================================================================="
)
print("Show first 3 training images")
# for i in range(3):
#     plt.imshow(dataset.train.data['images'][i])
#     plt.show()

cifar_net = CifarNet(dataset, args)

cifar_net.train()

predcited = []
for probs in cifar_net.predict(dataset.test.data["images"],
                               batch_size=args.batch_size):
    pred = np.argmax(probs)
    predcited.append(pred)

accuracy_test = accuracy_score(dataset.test.data["labels"], predcited)
TN, FP, FN, TP = confusion_matrix(dataset.test.data["labels"],
                                  predcited).ravel()

_test_acc = (TP + TN) / (FP + TN + FN + TP)
print('_test_acc: {}'.format(_test_acc))