示例#1
0
def load_model(model_file, dev, use_input_rois=False):
    global __desc__
    network = make_network(1, 'test', model_file, use_input_rois)
    funcs = []

    vis = NetworkVisitor(network.outputs)
    vis = vis.reset_comp_node(dev)
    env = FpropEnv()
    env.flags.enforce_var_shape = False
    megbrain.config.set_comp_graph_option(env.comp_graph,
                                          'log_static_mem_alloc', False)
    func = env.comp_graph.compile_outonly(
        [env.get_mgbvar(var) for var in vis.dest_vars])
    return func
示例#2
0
def make_network(minibatch_size = 128, debug = False):
	patch_size = 32
	inp = DataProvider("data", shape = (minibatch_size, 3, patch_size, patch_size), dtype = np.float32)
	label = DataProvider("label", shape = (minibatch_size, ), dtype = np.int32)

	lay = conv_bn(inp, 3, 1, 1, 16, True)

	n = 18
	lis = [16, 32, 64]
	for i in lis:
		lay = res_block(lay, i, n)
	
	#global average pooling
	#feature = lay.mean(axis = 2).mean(axis = 2)
	feature = Pooling2D("pooling", lay, window = 8, stride = 8, padding = 0, mode = "AVERAGE")
	pred = Softmax("pred", FullyConnected(
		"fc0", feature, output_dim = 10,
		nonlinearity = Identity()
		))
	
	network = Network(outputs = [pred])
	network.loss_var = CrossEntropyLoss(pred, label)
	
	if debug:
		visitor = NetworkVisitor(network.loss_var)
		for i in visitor.all_oprs:
			print(i)
			print(i.partial_shape)
			print("input = ", i.inputs)
			print("output = ", i.outputs)
			print()

	return network
示例#3
0
def make_network(minibatch_size = 128):
	pre_net = load_network("rand.data")

	inp = pre_net.outputs[-1]
	visitor = NetworkVisitor(inp).all_oprs
	for i in visitor:
		#if isinstance(i, BN):
		#	i.set_freezed()
		if isinstance(i, ParamProvider):
			i.set_freezed()
		if isinstance(i, DataProvider):
			dp = i
	lay = O.ZeroGrad(inp)
	chl = inp.partial_shape[1]

	p = []
	num_blocks = 1
	for tt in range(num_blocks):
		for j in range(2):
			lay = deconv_bn_relu("encoder_deconv_{}{}".format(tt, j), lay, kernel_shape = 3, stride = 1, padding = 0, output_nr_channel = chl)
		#lay = deconv_bn_relu("encoder_deconv_{}1".format(tt), lay, kernel_shape = 3, stride = 1, padding = 1, output_nr_channel = chl)
		p.append(lay)
		if tt != num_blocks:
			lay = deconv_bn_relu("encoder_deconv_{}{}".format(tt, j + 1), lay, kernel_shape = 2, stride = 2, padding = 0, output_nr_channel = chl // 2)
		chl = chl // 2
	lay = deconv_bn_relu("outputs", lay, kernel_shape = 3, stride = 1, padding = 1, output_nr_channel = 3, isbnrelu = False)
	mid = lay.partial_shape[2] // 2
	lay = lay[:, :, mid-16:mid+16, mid-16:mid+16]
	print(lay.partial_shape)
	loss = ((lay - dp)**2).sum(axis = 3).sum(axis = 2).sum(axis = 1).mean()
	network = Network(outputs = [lay, inp] + p)
	network.loss_var = loss
	return network
示例#4
0
def make_network(minibatch_size = 128):
	pre_net = load_network("trained.data")

	inp = pre_net.outputs[-1]
	visitor = NetworkVisitor(inp).all_oprs
	for i in visitor:
		if isinstance(i, BN):
			i.set_freezed()
		if isinstance(i, ParamProvider):
			i.set_freezed()
		if isinstance(i, DataProvider):
			dp = i
	lay = O.ZeroGrad(inp)
	chl = inp.partial_shape[1]

	p = []
	for tt in range(3):
		lay = deconv_bn_relu("encoder_deconv_{}0".format(tt), lay, kernel_shape = 3, stride = 1, padding = 1, output_nr_channel = chl)
		lay = deconv_bn_relu("encoder_deconv_{}1".format(tt), lay, kernel_shape = 3, stride = 1, padding = 1, output_nr_channel = chl)
		p.append(lay)
		if tt != 2:
			lay = deconv_bn_relu("encoder_deconv_{}2".format(tt), lay, kernel_shape = 2, stride = 2, padding = 0, output_nr_channel = chl // 2)
		chl = chl // 2
	lay = deconv_bn_relu("outputs", lay, kernel_shape = 3, stride = 1, padding = 1, output_nr_channel = 3, isbnrelu = False)
	loss = ((lay - dp)**2).sum(axis = 3).sum(axis = 2).sum(axis = 1).mean()
	network = Network(outputs = [lay, inp] + p)
	network.loss_var = loss
	return network
示例#5
0
def make_network(minibatch_size=128, debug=False):
    patch_size = 32
    inp = DataProvider("data",
                       shape=(minibatch_size, 3, patch_size, patch_size),
                       dtype=np.float32)
    label = DataProvider("label", shape=(minibatch_size, ), dtype=np.int32)

    lay, w = conv_bn(inp, 3, 1, 1, 16, True)
    lis_w = [w]

    n = 3
    lis = [16, 32, 64]
    for i in lis:
        lay, lis_new = res_block(lay, i, n)
        lis_w += lis_new

    #global average pooling
    #feature = lay.mean(axis = 2).mean(axis = 2)
    feature = Pooling2D("pooling",
                        lay,
                        window=8,
                        stride=8,
                        padding=0,
                        mode="AVERAGE")
    pred = Softmax(
        "pred",
        FullyConnected(
            "fc0",
            feature,
            output_dim=10,
            #W = G(mean = 0, std = (1 / 64)**0.5),
            #b = C(0),
            nonlinearity=Identity()))

    network = Network(outputs=[pred])
    network.loss_var = CrossEntropyLoss(pred, label)

    lmd = 1
    for w in lis_w:
        w = w.reshape(w.partial_shape[0], -1).dimshuffle(1, 0)
        w = w / ((w**2).sum(axis=0)).dimshuffle('x', 0)
        A = O.MatMul(w.dimshuffle(1, 0), w)
        network.loss_var += lmd * (
            (A - np.identity(A.partial_shape[0]))**2).mean()

    if debug:
        visitor = NetworkVisitor(network.loss_var)
        for i in visitor.all_oprs:
            print(i)
            print(i.partial_shape)
            print("input = ", i.inputs)
            print("output = ", i.outputs)
            print()

    return network
示例#6
0
def make_network(minibatch_size=128, debug=False):
    patch_size = 32
    inp = DataProvider("data",
                       shape=(minibatch_size, 3, patch_size, patch_size),
                       dtype=np.float32)
    label = DataProvider("label", shape=(minibatch_size, ), dtype=np.int32)

    lay = conv_bn(inp, 3, 1, 1, 16, True)

    n = 4
    lis = [16 * 4, 32 * 4, 64 * 4]
    se = None
    for i in range(len(lis)):
        lay, se = res_block(lay, lis[i], i, n, se)

    #global average pooling
    feature = lay.mean(axis=2).mean(axis=2)
    #feature = Pooling2D("pooling", lay, window = 8, stride = 8, padding = 0, mode = "AVERAGE")
    #feature = O.Concat([feature, se], axis = 1)
    pred = Softmax(
        "pred",
        FullyConnected(
            "fc0",
            feature,
            output_dim=10,
            #W = G(mean = 0, std = (1 / 64)**0.5),
            #b = C(0),
            nonlinearity=Identity()))

    network = Network(outputs=[pred])
    network.loss_var = CrossEntropyLoss(pred, label)

    if debug:
        visitor = NetworkVisitor(network.loss_var)
        for i in visitor.all_oprs:
            print(i)
            print(i.partial_shape)
            print("input = ", i.inputs)
            print("output = ", i.outputs)
            print()

    return network
示例#7
0
from megskull.graph import FpropEnv
from meghair.utils.io import load_network
from megskull.graph import Function
import numpy as np
import cv2
from train import MyMomentum

net = load_network(open("./data/dfconv.data", "rb"))
#test_func = Function().compile(net.outputs[0])

from megskull.network import NetworkVisitor
visitor = NetworkVisitor(net.loss_var)
offsets = []
locs = []
for i in visitor.all_oprs:
    print(i, i.name)
    if "Astype" in i.name:
        locs.append(i)
    if i.name == "conv2offsetx":
        offsets.append(i)
        print("A")
print(len(locs))
locs = locs[::4]
outs = [net.outputs[0]] + locs
test_func = Function().compile(outs)
outs1 = offsets
offs_func = Function().compile(outs1)


def load_data(name):
    import pickle