Exemple #1
0
def validate(loader,
             name,
             device=0,
             num_epoch=100,
             patch_size=61,
             checkPath=None,
             pertubation=False):
    op = operation(device=device)
    # loss = chainer.functions.softmax_cross_entropy

    embedding_dim = patch_size // 2 + 1
    _model = model(device=device, embedding_dim=embedding_dim)

    if (checkPath is not None):
        param_dict = load_obj(checkPath)
        _model.param_from_dict(param_dict)

    with device_guard(device):

        start_time = timer()
        num = 0
        count = 0
        correct = 0
        l = 0
        epoch = 0

        if (pertubation == False):
            ld_fun = loader.get(epoch, pertube=False)
        else:
            ld_fun = loader.periodic_get(epoch)

        for x, y, index in ld_fun:
            x, y = op.array(x), op.array(y)

            x.cast(op.run.float32)

            x = _model(x)

            # L = wrapper(loss, device, x, y)

            correct = (op.argmax(x, axis=1) == y).sum() + correct
            # l = Variable(L) + l
            num = num + y.shape[0]
            # count = count + 1

        print("correct: ", correct / num, "time: ", timer() - start_time)
Exemple #2
0
def pts_generater(files):
    # work_dir = "/home/DATA/ADDITIONAL_STORAGE/ADD2/DATA/TRAININGSETS/"
    # work_dir = "/media/huge/DATA/TRAININGSETS/"
    # work_dir = "/home/polaris-nn/CODE/Polaris_v5/NN/NNWrapper/zoo/VINN/DataSet/"
    # mainDir = work_dir+"/"+name+"/"
    # targetDir = work_dir+"/"+name+"/COLLECT"
    # files = list(filter(lambda x: re.search("pkl", x), os.listdir(targetDir)))

    POSI, NEGE, IMG = [], [], []

    for index, file in enumerate(files):
        # path = targetDir+"/"+file
        path = file
        # print(path)
        _dict = load_obj(path)
        positive, negetive, img = _dict['pos'], _dict['choice'], _dict[
            'img_out']
        POSI.append(positive)
        NEGE.append(negetive)
        IMG.append(img)
        # if(index==99):
        # break
    return POSI, NEGE, IMG
Exemple #3
0
def inference(img,
              pos,
              checkPath,
              device,
              design=None,
              patch_size=61,
              step=30,
              channels=4095):
    op = operation(device=device)

    preprocess = preprocessing(img,
                               channels=channels,
                               half_size=patch_size // 2,
                               device=device,
                               step=step)
    embedding = Embedding(patch_size, device=device)
    # channels_pooling = Channels_pooling(stride=channels_pooling_stride, device=device)

    embedding_dim = patch_size // 2 + 1
    _model = model(design=design, device=device, embedding_dim=embedding_dim)
    param_dict = load_obj(checkPath)
    # _model.param_from_dict(param_dict)
    with device_guard(device):
        for key, param in _model.namedparams():
            param.copydata(chainer.Parameter(cp.asarray(param_dict[key])))

    pos = op.array(pos)

    _x, pos, groups_start, groups_end = preprocess(pos)

    PRED = []

    with device_guard(device):

        for index in range(groups_start.shape[0]):
            _end = default_timer()

            start = groups_start[index]
            end = groups_end[index]

            start, end = int(start.ndarray()), int(end.ndarray())

            x = _x[:, :, start:end]

            x = preprocess.beforeEmbedding(x)

            x = embedding(x)

            # x = x.transpose([1,0,2,3])
            # x = channels_pooling(x)
            # x = x.transpose([1,0,2,3])

            x.cast(cp.float32)

            x = _model(x)

            # x = _model(x)

            pred = op.argmax(x, axis=1)
            PRED.append(pred)

        PRED = op.concatenate(PRED, axis=0)

    return PRED, pos
Exemple #4
0
def train(X,
          Y,
          minibatch=1000,
          num_epoch=1000,
          patch_size=31,
          checkPath=None,
          savePath="./save/mix/feed/",
          device=1):
    op = VariableOperation(device=device)
    loss = softmax_cross_entropy
    optimizer = Adam(alpha=0.0002)

    embedding_dim = patch_size // 2 + 1
    # _model = model_fun(design=design, device=device, embedding_dim=embedding_dim)
    _model = model(design=design5, device=0, embedding_dim=embedding_dim)

    if (checkPath is not None):
        param_dict = load_obj(checkPath)
        with device_guard(device):
            for key, param in _model.namedparams():
                param.copydata(chainer.Parameter(cp.asarray(param_dict[key])))

    optimizer.setup(_model)

    with device_guard(device):

        for epoch in range(num_epoch):

            # if(epoch%10==0):
            # 	check = epoch/10
            # 	if(check%2==0):
            # 		print("L1, and only for pooling")
            # 		for key, l in _model.namednodes():
            # 			if bool(re.search("linear", key)):
            # 				l.disable_update()
            # 			else:
            # 				l.enable_update()

            # 		optimizer.add_hook(Lasso(rate=0.001), name='lasso')
            # 	else:
            # 		print("No regularization, and only for linear")
            # 		for key, l in _model.namednodes():
            # 			if bool(re.search("pool", key)):
            # 				l.disable_update()
            # 			else:
            # 				l.enable_update()

            # 		optimizer.remove_hook('lasso')

            start_time = timer()
            num = 0
            count = 0
            correct = 0
            l = 0

            indices = np.arange(0, len(X), minibatch)
            for start in indices:
                if start + minibatch < len(X):
                    end = start + minibatch
                else:
                    end = len(X)

                x, y = X[start:end], Y[start:end]
                x, y = tensor(x, device=device), tensor(y, device=device)

                x.cast('float32')

                x = _model(x)

                L = wrapper(loss, device, x, y)

                _model.cleargrads()

                L.backward()

                optimizer.update()

                # print(type(x), type(y))
                # print(type(op.argmax(x, axis=1)), type(y))
                correct = (tensor(op.argmax(x, axis=1), device=device)
                           == y).sum() + correct
                l = Variable(L, device=device) + l
                num = num + y.shape[0]
                count = count + 1

            print("epoch: ", epoch, "num ", num, "correct: ", correct / num,
                  "loss: ", l / count, "time: ",
                  timer() - start_time)
Exemple #5
0
def train(model_fun,
          loader,
          device=0,
          num_epoch=100,
          patch_size=61,
          checkPath=None,
          pertubation=False,
          savePath=None):
    op = operation(device=device)

    loss = chainer.functions.softmax_cross_entropy
    optimizer = chainer.optimizers.Adam(alpha=0.0002)

    embedding_dim = patch_size // 2 + 1
    _model = model_fun(device=device, embedding_dim=embedding_dim)

    if (checkPath is not None):
        param_dict = load_obj(checkPath)
        # _model.param_from_dict(param_dict)
        with device_guard(device):
            for key, param in _model.namedparams():
                param.copydata(chainer.Parameter(cp.asarray(param_dict[key])))

    optimizer.setup(_model)

    with device_guard(device):

        for epoch in range(num_epoch):

            if (epoch % 10 == 0):
                check = epoch / 10
                if (check % 2 == 0):
                    print("L1, and only for pooling")
                    for key, l in _model.namedlinks():
                        if bool(re.search("linear", key)):
                            # print(key, "disable_update")
                            l.disable_update()
                        else:
                            l.enable_update()

                    # optimizer.add_hook(chainer.optimizer.Lasso(rate=0.005), name='lasso')
                    # optimizer.add_hook(Lasso(rate=0.005), name='lasso')
                    optimizer.add_hook(Lasso(rate=0.001), name='lasso')
                else:
                    print("No regularization, and only for linear")
                    for key, l in _model.namedlinks():
                        if bool(re.search("pool", key)):
                            # print(key, "disable_update")
                            l.disable_update()
                        else:
                            l.enable_update()

                    optimizer.remove_hook('lasso')
                    # optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.005), name='ridge')

            start_time = timer()
            num = 0
            count = 0
            correct = 0
            l = 0
            if (pertubation == False):
                ld_fun = loader.get(epoch, pertube=False)
            else:
                ld_fun = loader.periodic_get(epoch)

            for x, y, index in ld_fun:
                x, y = op.array(x), op.array(y)

                x.cast(op.run.float32)

                x = _model(x)

                L = wrapper(loss, device, x, y)

                _model.cleargrads()

                L.backward()

                optimizer.update()

                correct = (op.argmax(x, axis=1) == y).sum() + correct
                l = Variable(L, device=device) + l
                num = num + y.shape[0]
                count = count + 1

                # param_dict = _model.param_to_dict()
                # for key in param_dict.keys():
                # 	print(key)

                # for name, params in _model.namedparams():
                # 	if name=="/pooling_chain/0/weights":
                # 		print(name, params)
                # 		print(type(params._data[0].get()))

            print("epoch: ", epoch, "num ", num, "correct: ", correct / num,
                  "loss: ", l / count, "time: ",
                  timer() - start_time)

            # param_dict = _model.param_to_dict()
            param_dict = {}
            for key, params in _model.namedparams():
                # if name=="/pooling_chain/0/weights":
                # print(name, params)
                # print(type(params._data[0].get()))
                param_dict[key] = params.array.get()

            if (savePath is not None):
                mkdir(savePath)
                save_obj(param_dict, savePath + "/{}.pkl".format(epoch))
Exemple #6
0
    # print("X.shape: ", X.shape, "Y.shape: ", Y.shape)

    # np.save("./log/X.npy", X)
    # np.save("./log/Y.npy", Y)

    # end = timer()
    # X, Y = np.load("./log/X.npy"), np.load("./log/Y.npy")
    # print(timer()-end)

    # minibatch = 44
    # ld = loader(X, Y, minibatch, shuffle=True)

    # train(ld, device=1, num_epoch=100, size=61)
    # train_multi(ld, num_epoch=10)

    POSI = load_obj("./log/POSI.pkl")
    NEGE = load_obj("./log/NEGE.pkl")
    IMG = load_obj("./log/IMG.pkl")

    POSI = POSI[:10]
    NEGE = NEGE[:10]
    IMG = IMG[:10]

    minibatch = 25 * 3
    size = 31
    ld = pertube_loader(POSI,
                        NEGE,
                        IMG,
                        minibatch,
                        size=size,
                        step=25,
Exemple #7
0
    _bool = []
    _key = []
    for key in d1.keys():
        _bool.append((d1[key] == d2[key]).all())
        _key.append(key)
    _bool = np.array(_bool).astype(np.bool_)
    # print(_bool)
    return [_key[x] for x in np.where(_bool == False)[0]]


if __name__ == '__main__':

    # compare(d1, d2)
    for i in range(9):
        for j in range(i + 1, 9):
            d1 = load_obj("{}.pkl".format(i))
            d2 = load_obj("{}.pkl".format(j))
            # print(compare(d1, d2))
            result = compare(d1, d2)
            liner_check = False
            for res in result:
                if bool(re.search("linear", res)):
                    liner_check = True
            pool_check = False
            for res in result:
                if bool(re.search("pool", res)):
                    pool_check = True
            print("linear: ", liner_check, "pool: ", pool_check)

    print("#" * 100)