Exemplo n.º 1
0
    def __init__(self, train_path, test_path, model_file, model, img_h=32, img_w=110, batch_size=64, lr=1e-3,
                 use_unicode=True, best_loss=0.2, use_gpu=True, workers=1):
        self.model = model
        self.model_file = model_file
        self.use_unicode = use_unicode
        self.img_h = img_h
        self.img_w = img_w
        self.batch_size = batch_size
        self.lr = lr
        self.best_loss = best_loss
        self.best_acc = 0.95
        self.use_gpu = use_gpu
        self.workers = workers

        self.converter = utils.strLabelConverter(alphabet)
        self.criterion = CTCLoss()

        if self.use_gpu:
            print("[use gpu] ...")
            self.model = self.model.cuda()
            self.criterion = self.criterion.cuda()
        if torch.cuda.is_available() and not self.use_gpu:
            print("[WARNING] You have a CUDA device, so you should probably run with --cuda")

        # 加载模型
        if os.path.exists(self.model_file):
            self.load(self.model_file)
        else:
            print('[Load model] error !!!')

        self.transform = T.Compose([
            T.Resize((self.img_h, self.img_w)),
            T.ToTensor(),
            # T.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5])
        ])

        train_label = os.path.join(train_path, 'labels_normal.txt')
        train_dataset = my_dataset.MyDataset(root=train_path, label_file=train_label, transform=self.transform,
                                             is_train=True, img_h=self.img_h, img_w=self.img_w)
        self.train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=self.batch_size,
                                                        shuffle=True, num_workers=int(self.workers))
        test_label = os.path.join(test_path, 'labels_normal.txt')
        test_dataset = my_dataset.MyDataset(root=test_path, label_file=test_label, transform=self.transform,
                                            is_train=False, img_h=self.img_h, img_w=self.img_w)
        self.test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=self.batch_size,
                                                       shuffle=False, num_workers=int(self.workers))

        # setup optimizer
        # if opt.adam:
        #     self.optimizer = optim.Adam(crnn.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        # elif opt.adadelta:
        #     self.optimizer = optim.Adadelta(crnn.parameters(), lr=opt.lr)
        # else:
        #     self.optimizer = optim.RMSprop(crnn.parameters(), lr=opt.lr)
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=1e-5)
Exemplo n.º 2
0
IF_WANDB = 0
IF_SAVE = 0
LAYER_UNITS = 2000
LAYERS = 3
CLASS = 10
BATCH_SIZE = 100
NAME = 'neural_400_100'
WORKERS = 15

if IF_WANDB:
    import wandb
    wandb.init(project='neural', name=NAME)

#dataset = my_dataset.MyDataset(train = True, margin = 3, noise_rate = 0.05)
dataset_test = my_dataset.MyDataset(train=False)
#data_feeder = my_dataset.DataFeeder(dataset, BATCH_SIZE, num_workers = WORKERS)
images_t, labels_t = dataset_test.get_all()


class Quantized(torch.autograd.Function):
    @staticmethod
    def forward(ctx, input):
        ctx.save_for_backward(input)
        return (input >= 0).float()

    @staticmethod
    def backward(ctx, grad_output):
        input, = ctx.saved_tensors
        grad_input = grad_output.clone()
        grad_input[(input.abs() > 1)] = 0
Exemplo n.º 3
0
import math
import cv2

IF_WANDB = 1
IF_SAVE = 1
SIX = 6
BATCH_SIZE = 1000
WORKERS = 15
CLASS = 10
import cv2

if IF_WANDB:
    import wandb
    wandb.init(project='lut_hard')  #, name = '.')

dataset = my_dataset.MyDataset(train=True, margin=2, noise_rate=0.01)
dataset_test = my_dataset.MyDataset(train=False)
data_feeder = my_dataset.DataFeeder(dataset, BATCH_SIZE, num_workers=WORKERS)
images_t, labels_t = dataset_test.get_all()

delta_map = torch.zeros(2**SIX, SIX, 2).long().cuda()
for i in range(2**SIX):
    bins = ('{0:0%db}' % SIX).format(i)
    for j in range(SIX):
        k = 2**j
        if bins[SIX - 1 - j] == '1':
            low = -k
            high = 0
        else:
            low = 0
            high = k
Exemplo n.º 4
0
    train_test_split = config['data']['train_test_split']
    split_index = int(len(files)*train_test_split)
    train_files = files[:split_index]
    test_files = files[split_index:]
    # test_files = [f for f in files if np.load(label_dir + "/" + f + ".npy")[1] in range(6,8)]

    # print(test_files)

    print(f"Training samples: {len(train_files)}")
    print(f"Test samples: {len(test_files)}")
    # print(train_files[0:20])
    # print(test_files)

    # print(np.load(data_dir + "/" + files[0] + ".npy").shape)

    train_dataset = my_dataset.MyDataset(config, train_files)
    test_dataset = my_dataset.MyDataset(config, test_files)

    batch_size = config['model']['batch_size']

    train_loader, test_loader = my_dataset.make_variable_dataloader(train_dataset,
                                                                    test_dataset,
                                                                    batch_size = batch_size)

    print("Performing whole network training.")
    s = solver.Solver(train_loader, test_loader, config, load_dir = args.checkpoint)

    if args.load_emo is not None:
        s.model.load_pretrained_classifier(args.load_emo, map_location = 'cpu')
        print("Loaded pre-trained emotional classifier.")
Exemplo n.º 5
0
if cuda:
    generator = DataParallel(generator)
    generator.to(device_for_model)
    discriminator = DataParallel(discriminator)
    discriminator.to(device_for_model)

# Initialize weights
generator.apply(my_model.weights_init_normal)
discriminator.apply(my_model.weights_init_normal)

transform_train = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
])
dataset = my_dataset.MyDataset(data_path=data_dir, transform=transform_train)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=True,
                                         num_workers=0)

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(),
                               lr=opt.lr * 4,
                               betas=(opt.b1, opt.b2))
optimizer_info = torch.optim.Adam(itertools.chain(generator.parameters(),
                                                  discriminator.parameters()),
                                  lr=opt.lr,
Exemplo n.º 6
0
Arquivo: train.py Projeto: oycq/myros
        sum_count = 0
        for j, (inputs, ground_truth) in enumerate(test_loader):
            if CUDA:
                inputs = inputs.cuda()
                ground_truth = ground_truth.cuda()
            outputs = model(inputs)
            correct_count += ((outputs[:, 0] >
                               0.5) == (ground_truth[:, 0] == 1)).sum().item()
            sum_count += BATCH_SIZE


#            bar.update(j)
#        bar.finish()
    return correct_count / sum_count * 100

train_set = my_dataset.MyDataset('train')
test_set = my_dataset.MyDataset('test')
train_loader = torch.utils.data.DataLoader(train_set, BATCH_SIZE,\
        shuffle = True, num_workers = 5, drop_last =  True)
test_loader = torch.utils.data.DataLoader(test_set, BATCH_SIZE,\
        shuffle = True, num_workers = 5, drop_last =  True)
loss_f1 = nn.BCELoss()
loss_f2 = nn.MSELoss()
m = nn.Sigmoid()
mm = nn.ReLU()

for epoch in range(20000000):
    print('----- epoch %d -----' % epoch)
    time0 = time.time()
    #    test_result = test()
    #    print(test_result)