예제 #1
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    filepath = "F:\FCD data\cluster\cluster_region_1.npy"
    trajectory_cluster_region = list(np.load(filepath))

    training_data = []
    test_data = []
    all_count = len(trajectory_cluster_region)
    train_data_length = int(all_count * 0.9)
    count = 0
    labels = []
    for trajectory in trajectory_cluster_region:
        count = count + 1
        clusters = literal_eval(trajectory.strip().split(';')[0])
        regions = literal_eval(trajectory.strip().split(';')[1])

        clusters, regions = validate_trajectory(clusters, regions)
        if len(clusters) < 5 or len(regions) < 5:
            continue

        if count < train_data_length:
            training_data.append((regions, clusters))
        else:
            test_data.append((regions, clusters))
        labels.extend(clusters)

    word_to_ix = {}
    for sentence, tags in training_data:
        for word in sentence:
            if word not in word_to_ix:
                word_to_ix[word] = len(word_to_ix)

    # labels = list(np.load("F:\FCD data\cluster\destination_labels.npy"))
    count = 0
    tag_to_ix = {}
    for label in labels:
        if label not in tag_to_ix.keys():
            tag_to_ix[label] = count
            count = count + 1
    tag_to_ix[START_TAG] = count
    tag_to_ix[STOP_TAG] = count + 1

    model = torch.load("prediction_model.pkl")

    # Check predictions before training
    with torch.no_grad():
        precheck_sent = prepare_sequence(test_data[2][0], word_to_ix)
        precheck_tags = torch.tensor([tag_to_ix[t] for t in test_data[2][1]],
                                     dtype=torch.long)

        y = model(precheck_sent)
        print(y)
        make_dot(y)
        print(precheck_tags)
예제 #2
0
    def run(self, train_loader, test_loader, loss_fn):
        
        try:
            from visualize import make_dot
            y = self.net.forward(Variable(torch.from_numpy(test_loader.dataset[0]['image'])))
            g = make_dot(y)
            g.engine='dot'
            g.format='pdf'
            print(g.render(filename=os.path.join(self.log_dir, 'net.gv')))
        except:
            logger.warn('failed to draw net.')
        

        logger.check_eq(self.done, False, 'Done already!')
        if self.cuda:
            self.net.cuda()

        logger.info('Network Architecture:')
        print(str(self.net))
        sys.stdout.flush()

        logger.info('{} Hyperparameters:'.format(self.solver.__class__.__name__))
        print(str(self.solver.defaults))
        sys.stdout.flush()

        logger.info('Initial test with random initialized parameters:')
        self.test(epoch=0, loader=test_loader, loss_fn=loss_fn)
        for epoch in range(1, self.total_epochs+1):
            self.train(epoch=epoch, loader=train_loader, loss_fn=loss_fn)
            self.test(epoch=epoch, loader=test_loader, loss_fn=loss_fn)
            self.invoke_epoch_callback()
        self.save_stats()
        self.done=True
예제 #3
0
    def inspect(self):

        print(dir(self.dqn.conv1))

        for param in list(self.dqn.parameters()):
            print(param.size())

        print("conv2", self.dqn.conv2.kernel_size)
        print("conv3", self.dqn.conv3.kernel_size)
        print("lin1", self.dqn.affine1.in_features,
              self.dqn.affine1.out_features)
        print("lin2", self.dqn.affine2.in_features,
              self.dqn.affine2.out_features)
        weights = self.dqn.affine1.weight
        print(self.dqn.affine1.weight)
        print((self.dqn.affine1.weight[:, 1024:]))
        import visualize
        inputs = torch.randn(1, 4, 64, 64)
        inputs2 = additional_states = torch.zeros(4, 12)
        print("state", self.dqn.state_dict)
        y = self.dqn(Variable(inputs), Variable(inputs2))
        g = visualize.make_dot(y, self.dqn.state_dict)
        g.view()
예제 #4
0
    def train_tree(self):
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        self.tree = dt.decision_tree().cuda()
        print(self.tree)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(self.tree.parameters(), lr=0.001, momentum=0.9)

        for epoch in range(
                n.tree_trainer_epoch):  # loop over the dataset multiple times
            running_loss = 0.0
            for i, data in enumerate(pre.trainloader, 0):
                inputs, labels = data
                inputs, labels = Variable(inputs.cuda()), Variable(
                    labels.cuda())
                optimizer.zero_grad()
                # forward + backward + optimize
                outputs = self.tree(inputs)
                g = v.make_dot(outputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()
                # print statistics
                running_loss += loss.data[0]
                # update leaf loss statistics
                self._update_leaf_loss(
                    loss.data[0], self.tree.exit_node
                )  # must be called after backwards for accurate exit node

                if i % 2000 == 1999:  # print every 200 mini-batches
                    print('[%d, %5d] loss: %.3f' %
                          (epoch + 1, i + 1, running_loss / 2000))
                    running_loss = 0.0
            # after every epoch, grow the tree
            if self.tree.size * 3 == epoch:
                self._grow_tree()
        print('Finished Training')
        g.view()
예제 #5
0
                              dtype=dtype,
                              ltype=ltype,
                              sampling_rate=sampling_rate)

# In[4]:

from visualize import make_dot
from torch.autograd import Variable

input = Variable(torch.rand(1, 1, 256))
output = model(input)
params = dict(model.named_parameters())
#output.backward()
print(output)

make_dot(output, params)

# In[5]:

print("output length: ", model.output_length)

data_loader.start_new_epoch()
start_data = data_loader.get_wavenet_minibatch([model.receptive_field],
                                               model.receptive_field,
                                               model.output_length)[0]
start_data = start_data.squeeze()

plt.ion()
plt.plot(start_data[-200:].numpy())
plt.ioff()
예제 #6
0
파일: net.py 프로젝트: zymale/yousan.ai
        self.bn1 = nn.BatchNorm2d(12)
        self.conv2 = nn.Conv2d(12, 24, 3, 2)
        self.bn2 = nn.BatchNorm2d(24)
        self.conv3 = nn.Conv2d(24, 48, 3, 2)
        self.bn3 = nn.BatchNorm2d(48)
        self.fc1 = nn.Linear(48 * 5 * 5, 1200)
        self.fc2 = nn.Linear(1200, 128)
        self.fc3 = nn.Linear(128, 2)

    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))
        #print "bn1 shape",x.shape
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))
        x = x.view(-1, 48 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


if __name__ == '__main__':
    import torch
    from torch.autograd import Variable
    from visualize import make_dot
    x = Variable(torch.randn(1, 3, 48, 48))
    model = simpleconv3()
    y = model(x)
    g = make_dot(y)
    g.view()
예제 #7
0
        feat_1 = self.block1(feat)
        feat_2 = self.block2(feat_1)

        fine_1 = F.relu(self.fine_bn1(self.fine_conv1(feat)))
        fine_2 = F.relu(self.fine_bn2(self.fine_conv2(fine_1)))
        fine_3 = F.relu(self.fine_bn3(self.fine_conv3(fine_2)))

        out = feat_2 + fine_3
        out = F.relu(self.combine_bn1(self.combine_conv1(out)))
        out = self.combine_conv2(out)

        return out


class networkNormal(nn.Module):
    def __init__(self, inplane_normal):
        super(networkNormal, self).__init__()
        self.normalNetwork = basicNetwork(inplane_normal, 3)

    def forward(self, in_normal):
        normal = self.normalNetwork(in_normal)
        return normal


if __name__ == '__main__':
    net = HourglassNet()
    x = Variable(torch.Tensor(1, 3, 224, 224))
    albedo, norm, lighting, shading = net(x)
    g = make_dot(shading)
    g.render('HourglassNet')
예제 #8
0
def ConvertModel_caffe(pytorch_net,
                       InputShape,
                       softmax=False,
                       use_cuda=False,
                       save_graph=False):
    """ Pytorch to Caffe, only support single tensor input """
    import os
    #import caffe_pb2 as pb2
    from caffe.proto import caffe_pb2 as pb2
    from ConvertLayer_caffe import convert_caffe
    """ Need forward once """
    global inputs
    n, c, h, w = InputShape
    if use_cuda:
        print('Convert in cuda...')
        if torch.cuda.is_available():
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
        pytorch_net = pytorch_net.cuda()
        inputs = Variable(torch.zeros(n, c, h, w).cuda(), requires_grad=True)
    else:
        inputs = Variable(torch.zeros(n, c, h, w), requires_grad=True)

    pytorch_net.eval()
    outputs = pytorch_net(inputs, phase='eval')  #phase='eval'
    print('model outputs:')
    if isinstance(outputs, tuple) or isinstance(outputs, list):
        #print(outputs[0].shape) #(1, 10830 4)
        print(outputs[0][-1])  #last loc
    else:
        print(outputs)

    if save_graph:
        from visualize import make_dot
        model_name = str(pytorch_net.__class__.__name__) + '_caffe'
        fp = open("{}.dot".format(model_name), "w")
        dot = make_dot(outputs)
        print >> fp, dot
        fp.close()

    if softmax:
        import torch.nn as nn
        regularize = nn.Softmax()
        outputs = regularize(outputs)
    """ Travel computational graph in backward order """
    """ Need to count number of tops(indegree) of all nodes first """
    global visited, tops_dict, layer_type_count, dst
    global slice_point, multi_tops, axis_dict
    visited = set()
    tops_dict = dict()
    layer_type_count = dict()
    slice_point = dict()
    multi_tops = dict()
    axis_dict = dict()
    dst = 'caffe'

    for out in outputs:
        FindMultiTops(out.grad_fn)
    """ Travel computational graph in backward order """
    global caffe_net
    global convert, link
    convert = convert_caffe
    link = link_caffe
    caffe_net = []

    visited = set()
    tops_dict = dict()
    layer_type_count = dict()

    for out in outputs:
        DFS(out.grad_fn)
    """ Caffe input """
    text_net = pb2.NetParameter()
    if os.environ.get("T2C_DEBUG"):
        text_net.debug_info = True
    """ Caffe layer parameters """
    binary_weights = pb2.NetParameter()
    binary_weights.CopyFrom(text_net)
    for layer in caffe_net:
        binary_weights.layer.extend([layer])

        layer_proto = pb2.LayerParameter()
        layer_proto.CopyFrom(layer)
        del layer_proto.blobs[:]
        text_net.layer.extend([layer_proto])

    return text_net, binary_weights
예제 #9
0

net = GoogLeNet()
#===================================
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=1e-2)

has_output = False
for epoch in range(5):
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images)
        labels = Variable(labels)
        net.zero_grad()
        output = net(images)
        if has_output == False:
            visualize.make_dot(output).render("graph")
            has_output = True
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()

        print("epoch:%d, batch:%d, loss:%.4f" % (epoch, i, loss.data[0]))

total = 0.0
correct = 0.0
for images, labels in test_loader:
    images = Variable(images)
    output = net(images)
    val, index = torch.max(output, 1)
    total += images.size(0)
    correct += (index.data == labels).sum()
예제 #10
0
    if model_name == 'resnet50':
        m = torchvision.models.resnet50(pretrained=True)
    elif model_name == 'vgg16':
        m = torchvision.models.vgg16()
        m.classifier.add_module('softmax', torch.nn.Softmax())
    elif model_name == 'resnet26d':
        m = timm.create_model('resnet26d', pretrained=True)  #.cuda()
    m.eval(
    )  # very important here, otherwise batchnorm running_mean, running_var will be incorrect
    input_var = Variable(torch.rand(1, 3, 224, 224))

    print(m)
    output_var = m(input_var)
    fp = open("out.dot", "w")
    dot = make_dot(output_var)
    #print >> fp, dot
    dot.render('test-output/round-table.gv', view=False)  #True
    fp.close()
    #exit(0)

    if model_name == 'resnet50':
        pytorch2caffe(input_var, output_var, 'resnet50-pytorch2caffe.prototxt',
                      'resnet50-pytorch2caffe.caffemodel')
    elif model_name == 'vgg16':
        pytorch2caffe(input_var, output_var, 'vgg16-pytorch2caffe.prototxt',
                      'vgg16-pytorch2caffe.caffemodel')
    elif model_name == 'resnet26d':
        pytorch2caffe(input_var, output_var,
                      'resnet26d-pytorch2caffe.prototxt',
                      'resnet26d-pytorch2caffe.caffemodel')
예제 #11
0
    print(epoch)

    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):
        # get the inputs
        inputs, labels = data

        # wrap them in Variable
        inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        g = v.make_dot(outputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # print statistics
        running_loss += loss.data[0]
        if i % 2000 == 1999:  # print every 2000 mini-batches
            print('[%d, %5d] loss: %.3f' %
                  (epoch + 1, i + 1, running_loss / 2000))
            running_loss = 0.0

g.view()
print('Finished Training')
dataiter = iter(testloader)
images, labels = dataiter.next()
예제 #12
0
# tr_scans = np.load("../tr_sacns.npy")
# tr_wcs = np.load("../tr_wcs.npy")
# tr_was = np.load("../tr_was.npy")
Xva = np.load("./Xva.npy")
# # Xte = np.load( "../Xte.npy" )
va_wcs = np.load("./va_wcs.npy")
# # te_wcs=np.load("../te_wcs.npy")
va_was = np.load("./va_was.npy")
# # te_was=np.load("../te_was.npy")
va_scans = np.load("./va_scans.npy")
#
EPOCH = 1
p1, p2 = 1, 1
for epoch in range(EPOCH):
    for step, (x, yOffs, yConf) in enumerate(train_loader):
        (outConf, outOffs) = net(x)
        g,p=make_dot(outConf),make_dot(outOffs)
        # g.view()
        p.view()
        yConfl = yConf.type(torch.LongTensor)
        # if step == 0:
        #     p1 = 1 / loss_softmax(outConf, yConfl)
        #     p2 = 1 / loss_offset(outOffs, yOffs)
        a,b=loss_softmax(outConf, yConfl),loss_offset(outOffs, yOffs)
        loss =  a+ b*25
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if step!=0:
            break
    # print("EPOCH : ",epoch," loss_softmax : ",a," loss_offset : ",b*25," loss_tatal : ", loss)
예제 #13
0
#     optimizer.step()        # apply gradients
for epoch in range(num_epochs):
    batch_size_start = time.time()
    running_loss = 0.0
    for i, (inputs) in enumerate(train_loader):
        optimizer = torch.optim.SGD(alexnet_model.classifier.parameters(),
                                    lr=0.0001, momentum=0.9, weight_decay=0.0005,
                                   )
        torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=1e-7) #每个epoch更新一次lr
        inputs = Variable(inputs)
        labels = Variable(labels)
        optimizer.zero_grad()
        outputs = alexnet_model(inputs)
        criterion = nn.CrossEntropyLoss()
        loss = criterion(outputs, labels)  # 交叉熵
        loss.backward()
        optimizer.step()  # 更新权重
        running_loss += loss.data[0]

    print('Epoch [%d/%d], Loss: %.4f,need time %.4f'
          % (epoch + 1, num_epochs, running_loss / (4000 / batch_size), time.time() - batch_size_start))
#可视化
g = make_dot(alexnet_model)
g.view()
# 保存模型和特征
saveModelName = os.path.join(codeDirRoot, "model", "alexnet_model.pkl" + "_" + str(num_epochs))

torch.save(alexnet_model.state_dict(), saveModelName)


예제 #14
0
 def visualize_model(self, X_test):
     out = self.forward(X_test)
     make_dot(out)
예제 #15
0
# -*- coding:utf-8 -*-
import torch
from torch.autograd import Variable
from models.network import PyramidHourglassNet
from visualize import make_dot

x = Variable(torch.randn(6, 3, 256, 256))
model = PyramidHourglassNet(nFeats=256, nModules=2, numOutput=16, nStack=2)
y = model(x)
# 测流动方向,如果batch按照(6,3,256,256)放置,就用y[index],因为y是个list,没有grad_fn
# 否则直接y就可以
g = make_dot(y[0])
g = make_dot(y)
g.view()