예제 #1
0
class LineFilter:
    def __init__(self):
        self.model = DenseNet(growth_rate=8,
                              block_config=(2, 2, 2),
                              bn_size=4,
                              drop_rate=0,
                              num_init_features=8 * 2,
                              small_inputs=True,
                              efficient=True)
        self.model.eval()
        self.model.load_state_dict(
            torch.load("save/param_best.pth",
                       map_location=lambda storage, loc: storage))
        summary(self.model, input_size=(3, 480, 640))

    def predict(self, input_data):
        output = self.model(input_data).squeeze()
        output[output > 255] = 255
        output[output < 150] = 0
        output = output.detach().numpy()
        return output.astype(dtype=np.uint8)
예제 #2
0
state = model.state_dict()
state = OrderedDict(
    (k.replace('.norm1.', '.bottleneck.norm_'), v) for k, v in state.items())
state = OrderedDict(
    (k.replace('.conv1.', '.bottleneck.conv_'), v) for k, v in state.items())

model_effi.load_state_dict(state)
if use_cuda:
    model.cuda()
    model_effi.cuda()
    cudnn.deterministic = True
    if multigpus:
        model = nn.DataParallel(model, device_ids=[0, 1])
        model_effi = nn.DataParallel(model_effi, device_ids=[0, 1])
if is_eval:
    model.eval()
    model_effi.eval()
# create the model inputs
input_var = torch.randn(8, 3, 32, 32)
if use_cuda:
    input_var = input_var.cuda()

out = model(input_var)
model.zero_grad()
out.sum().backward()
param_grads = OrderedDict()

for name, param in model.named_parameters():
    assert param.grad is not None, name
    param_grads[name] = param.grad.data
out_effi = model_effi(input_var)
예제 #3
0
파일: train.py 프로젝트: johnrayn/hackaway
for epoch in range(config.epochs):
    for i, (input, target) in enumerate(train_data_iterator()):
        input = torch.FloatTensor(input)
        target = torch.LongTensor(target)
        input_var = torch.autograd.Variable(input.cuda(async=True))
        target_var = torch.autograd.Variable(target.cuda(async=True))
        # feed
        output = net(input_var)
        loss = loss_fn(output, target)
        # backwwrd
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # accuracy
        if (i + 1) % 100 == 0:
            net.eval()
            data, label = test_data_helper()
            data, label = Variable(data).cuda(), Variable(label).cuda()
            with torch.no_grad():
                output = net(data)
                batch_size = output.size(0)
                pred = torch.argmax(output.view(-1, config.captlen,
                                                config.charlen),
                                    dim=2)
                label = torch.argmax(label.view(-1, config.captlen,
                                                config.charlen),
                                     dim=2)
                accuracy = torch.eq(
                    pred, label).sum().item() / (batch_size * config.captlen)
            if accuracy > best_acc:
                best_acc = accuracy