Exemple #1
0
 def train(self):
     dataset = {
         'test': self.process(self.params['test_path']),
         'train': self.process(self.params['train_path'])
     }
     batchset = [
         dataset['train'][step:step + self.params['batch_size']]
         for step in range(
             0, len(dataset['train'] - self.params['batch_size'] +
                    1), self.params['batch_size'])
     ]
     self.net = Classifier(Network())
     self.optim = chainer.optimizers.Adam()
     self.optim.setup(self.net)
     for epoch in range(self.params['epoch']):
         for step, batch in enumerate(batchset):
             batch = np.array(batch, dtype=int)
             data = batch[:, :, 0]
             label = batch[:, :, 1]
             self.net.predictor.reset_state()
             self.net.cleargrads()
             loss, accuracy = self.net(data, label)
             loss.backward()
             self.optim.update()
             print("#{:08d} step(epoch {:02d}) loss={:.8f} accuracy={:.8f}".
                   format(step, epoch, loss.data, accuracy.data))
     self.save()
def main(args: Namespace) -> None:

    net = resnet18_num_classes(pretrained=False,
                               num_classes=200,
                               p_drop=0.5,
                               type_net='custom')
    net.cpu()
    classifier = Classifier(net)
    classifier.load(args.path_weights.resolve())

    fig_vis = classifier.classify_and_gradcam_by_path(
        args.path_pic.resolve(), target_class=args.target_class)

    if args.path_save != Path(''):
        fig_vis.savefig(args.path_save.resolve())
Exemple #3
0
def main(args: Namespace):
    args.freeze = parse_to_dict(args.freeze)
    args.aug_degree = parse_to_dict(args.aug_degree)

    results_path = args.log_dir / str(datetime.now())
    results_path.mkdir(exist_ok=True, parents=True)

    write_args(results_path, vars(args))

    fix_seed(args.seed)

    (train_frame,
     test_frame), labels_num2txt = prepare_dataframes(args.data_root)

    train_set = TinyImagenetDataset(train_frame)
    test_set = TinyImagenetDataset(test_frame)

    model = resnet18_num_classes(pretrained=True,
                                 num_classes=200,
                                 p_drop=args.prob_drop,
                                 type_net=args.arch)

    classifier = Classifier(net=model)

    stopper = Stopper(args.n_wrongs, args.delta_wrongs)

    trainer = Trainer(classifier=classifier,
                      train_set=train_set,
                      test_set=test_set,
                      results_path=results_path,
                      device=args.device,
                      batch_size=args.batch_size,
                      num_workers=args.num_workers,
                      num_visual=args.num_visual,
                      aug_degree=args.aug_degree,
                      lr=args.lr,
                      lr_min=args.lr_min,
                      stopper=stopper,
                      labels_num2txt=labels_num2txt,
                      freeze=args.freeze,
                      weight_decay=args.weight_decay,
                      label_smooth=args.label_smooth,
                      period_cosine=args.period_cosine)

    trainer.train(num_epoch=args.num_epoch)
Exemple #4
0
def Mlp(args):
    loader_spec = (args.feature_num, args.class_num)

    if args.mode == 'train':
        train_dataset = Loader(args.train_data_dir,
                               *loader_spec,
                               data_num=args.train_data_num)
        validation_dataset = Loader(args.validation_data_dir,
                                    *loader_spec,
                                    data_num=args.validation_data_num)
    elif args.mode == 'validation':
        train_dataset = None
        validation_dataset = Loader(args.validation_data_dir,
                                    *loader_spec,
                                    data_num=args.validation_data_num)
    else:
        train_dataset = None
        validation_dataset = None

    model = Classifier(train_dataset, validation_dataset, L2Loss(), accuracy,
                       args.epochs, args.learning_rate)

    layer_spec = (sigmoid, d_sigmoid, args.checkpoint_dir)
    model.append_layer(
        Dense(args.feature_num,
              args.hidden_layer_neurons[0],
              *layer_spec,
              scope="input_layer"))
    for i in range(args.hidden_layer_num - 1):
        model.append_layer(
            Dense(args.hidden_layer_neurons[i],
                  args.hidden_layer_neurons[i + 1],
                  *layer_spec,
                  scope="hidden_layer_" + str(i)))
    model.append_layer(
        Dense(args.hidden_layer_neurons[-1],
              args.class_num,
              *layer_spec,
              scope="output_layer"))
    return model
Exemple #5
0
        losses1.append(loss.item())
        if i % 10 == 0:
            print("Loss:", i, losses1[-1])

# if epoch % 10 == 0:
#     pred_cpu["mask"] = torch.clamp(pred_cpu["mask"], min=-0, max=1)
#     plt.imshow(transforms.ToPILImage()(batch["mask"][0]))
#     plt.show()
#     plt.imshow(transforms.ToPILImage()(pred_cpu["mask"][0]))
#     plt.show()

vae.train(False)

# Train Classifier
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
classifier = Classifier()

# Use multiple GPUs if available
if torch.cuda.device_count() > 1:
    print("Using ", torch.cuda.device_count(), "GPUs")
    classifier = nn.DataParallel(classifier)
classifier.to(device)

optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001)
losses2 = []
valError = []

for epoch in range(6):
    train_iter = iter(train_loader)
    batch = None
    preds = None
Exemple #6
0
labels = np.array(
    ['agiri', 'botsu', 'others', 'sonya', 'yasuna', 'yasuna_sonya'])
label_indices = {label: index for index, label in enumerate(labels)}
weight = torch.Tensor([1 / 9, 1 / 2, 1 / 13, 1 / 35, 1 / 68, 1 / 9])

# データローダの用意
train_set = DatasetFromFolder(join('dataset', 'train'), label_indices, 'train')
train_data_loader = DataLoader(dataset=train_set,
                               batch_size=option.batchSize,
                               shuffle=True)
test_set = DatasetFromFolder(join('dataset', 'test'), label_indices, 'test')
test_data_loader = DataLoader(dataset=test_set, batch_size=1)

# 分類器と誤差関数の用意
if option.cuda:
    classifier = Classifier().cuda()
    # 重み付きクロスエントロピー
    criterion = nn.CrossEntropyLoss(weight=weight.cuda()).cuda()
else:
    classifier = Classifier()
    # 重み付きクロスエントロピー
    criterion = nn.CrossEntropyLoss(weight=weight)

# Adamオプティマイザを用意
optimizer = optim.Adam(classifier.parameters(), lr=option.lr)

# ネットワーク構造の表示
print(classifier)


def train():
Exemple #7
0
 def __init__(self, params=None):
     # data/ and label/ resides under dir_path
     self.params = params or {}
     self.map = {'.': 0}
     self.net = Network()
     self.load()
Exemple #8
0
class Controller:
    def __init__(self, params=None):
        # data/ and label/ resides under dir_path
        self.params = params or {}
        self.map = {'.': 0}
        self.net = Network()
        self.load()

    def process(self, path):
        data_path = join(path, 'data')
        label_path = join(path, 'label')

        if os.path.exists(join(path, "dataset.pkl")):
            dataset = pickle.load(open(join(path, "dataset.pkl"), "rb"))
            print("read dataset from .pkl")
            return dataset

        dataset = []
        for file_step, filename in enumerate(os.listdir(data_path)):
            file_data = open(data_path + '/' + filename, "r")
            file_label = open(label_path + '/' + filename, "r")
            sentences = file_data.read().split('\n')[:-1]
            labels = file_label.read().split('\n')[:-1]

            for index, (data_in, label_in) in enumerate(zip(sentences,
                                                            labels)):
                if len(label_in) != self.params['word_cnt']:
                    print("warning: data block", len(label_in))
                    continue
                data_in += '.' * (self.params['word_cnt'] - len(data_in))
                data = []
                for step, (char, sign) in enumerate(zip(data_in, label_in)):
                    if char not in self.map:
                        self.map[char] = len(self.map)
                    data.append([self.map[char], int(sign)])
                dataset.append(data)

            print("#%04d file '%s', datasize %d*64 (%d)" %
                  (file_step, filename, len(dataset), len(dataset) * 64))
        random.shuffle(dataset)
        pickle.dump(dataset, open(path + "/dataset.pkl", "wb"))
        pickle.dump(self.map, open(self.params['save_path'] + "/map.pkl",
                                   "wb"))
        return dataset

    def train(self):
        dataset = {
            'test': self.process(self.params['test_path']),
            'train': self.process(self.params['train_path'])
        }
        batchset = [
            dataset['train'][step:step + self.params['batch_size']]
            for step in range(
                0, len(dataset['train'] - self.params['batch_size'] +
                       1), self.params['batch_size'])
        ]
        self.net = Classifier(Network())
        self.optim = chainer.optimizers.Adam()
        self.optim.setup(self.net)
        for epoch in range(self.params['epoch']):
            for step, batch in enumerate(batchset):
                batch = np.array(batch, dtype=int)
                data = batch[:, :, 0]
                label = batch[:, :, 1]
                self.net.predictor.reset_state()
                self.net.cleargrads()
                loss, accuracy = self.net(data, label)
                loss.backward()
                self.optim.update()
                print("#{:08d} step(epoch {:02d}) loss={:.8f} accuracy={:.8f}".
                      format(step, epoch, loss.data, accuracy.data))
        self.save()

    def test(self, sentence):
        result, x = [], []
        for char in sentence:
            if char not in self.map:
                print(char, "not in map")
                return None
            else:
                x.append(self.map[char])
        self.net.reset_state()
        pred = self.net(np.array(x, dtype=int))

        buf = []
        for x, char in zip(pred.data, sentence):
            sign = np.where(x == x.max())[0][0]
            buf.append(char)
            if sign == 2:
                result.append("".join(buf))
                buf.clear()
        if buf:
            result.append("".join(buf))
        return result

    def save(self):
        chainer.serializers.save_npz(
            join(self.params['save_path'], self.params['name'] + '.model'),
            self.net.predictor)
        chainer.serializers.save_npz(
            join(self.params['save_path'], self.params['name'] + '.optim'),
            self.optim)

    def load(self):
        chainer.serializers.load_npz(
            join(self.params['save_path'], self.params['name'] + '.model'),
            self.net)
        self.map = pickle.load(
            open(join(self.params['save_path'], "map.pkl"), "rb"))