Exemplo n.º 1
0
                                              train_data_dup_count,
                                              replace=True)
train_data_addon = train_data[train_data_true_sample_idx]

# make sure that all the addon have true labels
assert all([x[1] == 1 for x in train_data_addon])

# stack the addon to the original trainning data and shuffle again
train_data = np.concatenate((train_data, train_data_addon), axis=0)
train_data_size = len(train_data)
shuffle_idx = np.random.permutation(train_data_size)
train_data = train_data[shuffle_idx]

# init model
model = ConvNet()
model = model.cuda()

criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=1e-5 * 2,
                             weight_decay=1e-2)

# train loop
# use k-fold validation
k_fold = 10
fold_size = int(train_data_size // k_fold)
for i in range(k_fold):

    # split data into train/val
    val_data_curr_fold = train_data[i * fold_size:(i + 1) * fold_size]
    train_data_curr_fold_head = train_data[:i * fold_size]
Exemplo n.º 2
0
    # Specify the type of model
    if model_type == 'conv':
        model = ConvNet()
    elif model_type == 'fully':
        model = Fully()

    # Set the type of gradient optimizer and the model it update
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

    # Choose loss function
    criterion = nn.CrossEntropyLoss()

    # Check if GPU is available, otherwise CPU is used
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        model.cuda()

    # Four list to plot learning curve
    train_loss = []
    train_acc = []
    validation_loss = []
    validation_acc = []

    # Run any number of epochs you want
    ep = 10
    for epoch in range(ep):
        print('Epoch:', epoch)
        ##############
        ## Training ##
        ##############
Exemplo n.º 3
0
    with open('char_dict', 'rb') as f:
        class_dict = pickle.load(f)
    num_classes = len(class_dict)

    # 读取数据
    transform = transforms.Compose([
        transforms.Resize((64, 64)),
        transforms.ToTensor(),
    ])
    dataset = HWDB(path=data_path, transform=transform)
    print("训练集数据:", dataset.train_size)
    print("测试集数据:", dataset.test_size)
    trainloader, testloader = dataset.get_loader(batch_size)

    net = ConvNet(num_classes)
    if torch.cuda.is_available():
        net = net.cuda()
    net.load_state_dict(torch.load('checkpoints/handwriting_iter_009.pth'))

    print('网络结构:\n')
    #summary(net, input_size=(3, 64, 64), device='cuda')
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=lr)
    writer = SummaryWriter(log_path)
    for epoch in range(10, epochs):
        train(epoch, net, criterion, optimizer, trainloader, writer=writer)
        valid(epoch, net, testloader, writer=writer)
        print("epoch%d 结束, 正在保存模型..." % epoch)
        torch.save(net.state_dict(),
                   save_path + 'handwriting_iter_%03d.pth' % epoch)
Exemplo n.º 4
0
# preprocess data
training_set = TimitDataset('./data', labels, stepsize, freq_bins, frame_step, frame_size)
trainloader = get_batch_data(training_set, batch_size)

test_set = TimitDataset('./data', labels, stepsize, freq_bins, frame_step, frame_size, traintest='TEST')
testloader = get_batch_data(test_set, batch_size)

device = torch.cuda.device(0)

capsnet = CapsuleNet(num_classes=nr_classes)
capsnet.cuda()

capsnet_optimizer = optim.Adam(capsnet.parameters())

convnet = ConvNet(num_classes = nr_classes)
convnet.cuda()

convnet_loss = torch.nn.MSELoss()
convnet_optimizer = optim.Adam(convnet.parameters())

def train_model(model, optimizer, num_epochs=10):

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        model.train()
        running_loss = 0.0
        running_accuracy = 0.0

        for idx, (inputs, labels) in enumerate(trainloader, 0):
Exemplo n.º 5
0
import cv2
import torch
from torchsummary import summary
from torchvision.transforms import ToTensor
import numpy as np
from tensorboardX import SummaryWriter
from model import ConvNet
from widerface import WIDERFaceDetection
from augmentations import SSDAugmentation

if __name__ == "__main__":
    net = ConvNet()
    net.load_state_dict(torch.load('no_gassuion_epoch240.pth'))
    net = net.eval()
    if torch.cuda.is_available():
        net = net.cuda()
    summary(net.cuda(), input_size=(3, 640, 640), batch_size=1, device='cuda')

    WIDERFace_ROOT = r"F:\Datasets\人脸识别\WIDERFACE"
    dataset = WIDERFaceDetection(WIDERFace_ROOT)

    writer = SummaryWriter('eval_log')

    # img = dataset.pull_image(1144)
    img = cv2.imread('2.jpg')
    # cv2.waitKey()
    # _, img = cv2.VideoCapture(0).read()
    # img = cv2.resize(img, (640, 640))

    src = img.copy()
    img = ToTensor()(img).unsqueeze(0)
Exemplo n.º 6
0
class Wrapper(object):
    """docstring for Wrapper."""

    def __init__(self, config, cont=None):
        super(Wrapper, self).__init__()
        with open(config, 'r') as f:
            config = json.load(f)
        self.config = config
        self.best_path = str(self.config['model']['model_save_path'] +
            self.config['name'] + '_model_best.pt')
        self.model = ConvNet(config['model'])
        self.continuing = False
        if cont is not None:
            print('loading in weights')
            self.load_model(cont)
            self.continuing = True

        self.cuda = torch.cuda.is_available()
        if self.cuda:
            print('using cuda')
            self.model.cuda()

    def train(self):
        model = self.model
        config = self.config
        trainloader = DataLoader(
            KanjiDataset(self.config, train=True),
                batch_size=config['train']['batch_size'], shuffle=True, pin_memory=True)
        # self.valloader = DataLoader(
        #     KanjiDataset(self.config, train=False),
        #         batch_size=config['train']['batch_size'], pin_memory=True)
        self.valset = KanjiDataset(self.config, train=False)
        objective = nn.CrossEntropyLoss()
        self.objective = objective
        optimizer = optim.Adam(model.parameters(), lr=config['train']['learning_rate'])

        # bestloss = float('Inf') if not self.continuing else self.valid()
        bestacc = 0.0 if not self.continuing else self.eval()[0]
        past_best = 0
        max_past = 50
        for e in range(config['train']['epochs']):
            avgloss = 0.0
            for i, (x, y) in enumerate(trainloader):
                if self.cuda:
                    x = x.cuda(async=True)
                    y = y.cuda(async=True)

                optimizer.zero_grad()
                preds = model(x)
                loss = objective(preds, y)
                avgloss += loss.item()
                loss.backward()
                optimizer.step()

                preds = None
                gc.collect()
            avgloss /= len(trainloader)
            # vloss = self.valid()
            vacc = self.eval()[0]
            if e%5==0:
                print('epoch: {}, loss: {:.4f}, val_acc: {:.4f}'
                    .format( e+1,       avgloss,           vacc ) )
                # print('epoch: {}, loss: {:.4f}, val_loss: {:.4f}, memory: {:.4f}'
                #     .format(e+1, avgloss, vloss, torch.cuda.memory_allocated(0) / 1e9 ) )
            # if e%20==0:
            #     self.print_acc()
            # if vloss < bestloss:
            if vacc > bestacc:
                path = str(self.config['model']['model_save_path'] +
                    self.config['name'] + '_model_{:.4f}.pt'.format(vacc))
                self.save_model(path)
                self.save_model(self.best_path)
                # bestloss = vloss
                bestacc = vacc
                past_best = 0
            else:
                past_best += 1
            if past_best >= max_past:
                print('past')
                break

        self.valloader = None
        self.print_acc()
        return

    def valid(self):
        loss = 0.0
        for (x, y) in self.valloader:
            if self.cuda:
                x = x.cuda(async=True)
                y = y.cuda(async=True)
            loss += self.objective(self.model(x), y).item()
        return loss/len(self.valloader)

    def eval(self, train=False):
        validset = self.valset if train else KanjiDataset(self.config, train=False)
        acc = 0
        conf = np.zeros((self.config['model']['classes'],
            self.config['model']['classes']), dtype=np.int32)
        for (x, y) in validset:
            pred = self.predict(x)
            acc += (pred == y)
            conf[y, pred] = conf[y, pred] + 1
        return acc/len(validset), conf

    def print_acc(self):
        acc, conf = self.eval()
        print('acc:', acc)
        print('conf:\n', conf)

    def predict(self, image):
        image = torch.unsqueeze(image, 0)
        if self.cuda:
            image = image.cuda(async=True)
        pred = self.model(image)
        pred = torch.argmax(pred[0])
        return pred.item()

    def save_model(self, path):
        torch.save( self.model.state_dict(), path )
        print('save:', path)

    def load_model(self, cont):
        path = self.best_path
        if cont != 'cont':
            path = join(self.config['model']['model_save_path'], cont)
        print('loading path:', path)
        self.model.load_state_dict( torch.load( path ) )