Пример #1
0
def main():
    try:
        # Setup argument parser
        parser = ArgumentParser(description="Wide residual network")

        parser.add_argument("--port", default=9999, help="listen port")
        parser.add_argument("--use_cpu",
                            action="store_true",
                            help="If set, load models onto CPU devices")
        parser.add_argument("--parameter_file", default="wrn-50-2.pickle")
        parser.add_argument("--model",
                            choices=['resnet', 'wrn', 'preact', 'addbn'],
                            default='wrn')
        parser.add_argument("--depth",
                            type=int,
                            choices=[18, 34, 50, 101, 152, 200],
                            default='50')

        # Process arguments
        args = parser.parse_args()
        port = args.port

        # start to train
        agent = Agent(port)

        net = model.create_net(args.model, args.depth, args.use_cpu)
        if args.use_cpu:
            print('Using CPU')
            dev = device.get_default_device()
        else:
            print('Using GPU')
            dev = device.create_cuda_gpu()
            net.to_device(dev)
        model.init_params(net, args.parameter_file)
        print('Finish loading models')

        labels = np.loadtxt('synset_words.txt', str, delimiter='\t ')
        serve(net, labels, dev, agent)

        # acc = evaluate(net, '../val_list.txt',  'image/val', dev)
        # print acc

        # wait the agent finish handling http request
        agent.stop()
    except SystemExit:
        return
    except:
        traceback.print_exc()
        sys.stderr.write("  for help use --help \n\n")
        return 2
Пример #2
0
def main():
    try:
        # Setup argument parser
        parser = ArgumentParser(description="VGG inference")

        parser.add_argument("--port", default=9999, help="listen port")
        parser.add_argument("--use_cpu",
                            action="store_true",
                            help="If set, load models onto CPU devices")
        parser.add_argument("--parameter_file", default="")
        parser.add_argument("--depth",
                            type=int,
                            choices=[11, 13, 16, 19],
                            default='11')
        parser.add_argument("--batchnorm",
                            action='store_true',
                            help='use batchnorm or not')

        # Process arguments
        args = parser.parse_args()
        port = args.port

        # start to train
        agent = Agent(port)

        net = model.create_net(args.depth, 1000, args.batchnorm, args.use_cpu)
        if args.use_cpu:
            print('Using CPU')
            dev = device.get_default_device()
        else:
            print('Using GPU')
            dev = device.create_cuda_gpu()
            net.to_device(dev)
        model.init_params(net, args.parameter_file)
        print('Finish loading models')

        labels = np.loadtxt('synset_words.txt', str, delimiter='\t ')
        serve(net, labels, dev, agent)

        # acc = evaluate(net, '../val_list.txt',  'image/val', dev)
        # print acc

        # wait the agent finish handling http request
        agent.stop()
    except SystemExit:
        return
    except:
        traceback.print_exc()
        sys.stderr.write("  for help use --help \n\n")
        return 2
Пример #3
0
def main():
    try:
        # Setup argument parser
        parser = ArgumentParser(description="Wide residual network")

        parser.add_argument("--port", default=9999, help="listen port")
        parser.add_argument("--use_cpu", action="store_true",
                            help="If set, load models onto CPU devices")
        parser.add_argument("--parameter_file", default="wrn-50-2.pickle")
        parser.add_argument("--model", choices=['resnet', 'wrn', 'preact',
                                                'addbn'], default='wrn')
        parser.add_argument("--depth", type=int, choices=[18, 34, 50, 101,
                                                          152, 200],
                            default='50')

        # Process arguments
        args = parser.parse_args()
        port = args.port

        # start to train
        agent = Agent(port)

        net = model.create_net(args.model, args.depth, args.use_cpu)
        if args.use_cpu:
            print('Using CPU')
            dev = device.get_default_device()
        else:
            print('Using GPU')
            dev = device.create_cuda_gpu()
            net.to_device(dev)
        model.init_params(net, args.parameter_file)
        print('Finish loading models')

        labels = np.loadtxt('synset_words.txt', str, delimiter='\t ')
        serve(net, labels, dev, agent)

        # acc = evaluate(net, '../val_list.txt',  'image/val', dev)
        # print acc

        # wait the agent finish handling http request
        agent.stop()
    except SystemExit:
        return
    except:
        traceback.print_exc()
        sys.stderr.write("  for help use --help \n\n")
        return 2
Пример #4
0
def main():
    try:
        # Setup argument parser
        parser = ArgumentParser(description='DenseNet inference')

        parser.add_argument("--port", default=9999, help="listen port")
        parser.add_argument("--use_cpu",
                            action="store_true",
                            help="If set, load models onto CPU devices")
        parser.add_argument("--parameter_file", default="densenet-121.pickle")
        parser.add_argument("--depth",
                            type=int,
                            choices=[121, 169, 201, 161],
                            default=121)

        parser.add_argument('--nb_classes', default=1000, type=int)

        # Process arguments
        args = parser.parse_args()
        port = args.port

        # start to train
        agent = Agent(port)

        net = model.create_net(args.depth, args.nb_classes, 0, args.use_cpu)
        if args.use_cpu:
            print('Using CPU')
            dev = device.get_default_device()
        else:
            print('Using GPU')
            dev = device.create_cuda_gpu()
            net.to_device(dev)
        print('start to load parameter_file')
        model.init_params(net, args.parameter_file)
        print('Finish loading models')

        labels = np.loadtxt('synset_words.txt', str, delimiter='\t ')
        serve(net, labels, dev, agent)
        # wait the agent finish handling http request
        agent.stop()

    except SystemExit:
        return
    except:
        traceback.print_exc()
        sys.stderr.write("  for help use --help \n\n")
        return 2
Пример #5
0
def main():
    try:
        # Setup argument parser
        parser = ArgumentParser(description="VGG inference")

        parser.add_argument("--port", default=9999, help="listen port")
        parser.add_argument("--use_cpu", action="store_true",
                            help="If set, load models onto CPU devices")
        parser.add_argument("--parameter_file", default="")
        parser.add_argument("--depth", type=int, choices=[11, 13, 16, 19],
                            default='11')
        parser.add_argument("--batchnorm", action='store_true',
                            help='use batchnorm or not')

        # Process arguments
        args = parser.parse_args()
        port = args.port

        # start to train
        agent = Agent(port)

        net = model.create_net(args.depth, 1000, args.batchnorm, args.use_cpu)
        if args.use_cpu:
            print('Using CPU')
            dev = device.get_default_device()
        else:
            print('Using GPU')
            dev = device.create_cuda_gpu()
            net.to_device(dev)
        model.init_params(net, args.parameter_file)
        print('Finish loading models')

        labels = np.loadtxt('synset_words.txt', str, delimiter='\t ')
        serve(net, labels, dev, agent)

        # acc = evaluate(net, '../val_list.txt',  'image/val', dev)
        # print acc

        # wait the agent finish handling http request
        agent.stop()
    except SystemExit:
        return
    except:
        traceback.print_exc()
        sys.stderr.write("  for help use --help \n\n")
        return 2
Пример #6
0
def predict():
    # jsonファイルを読み込んでパラメータを設定する
    # よく呼び出すパラメータを変数に代入
    params = utils.load_params()
    optim_params = params["optim_params"]

    # GPUが使用可能ならGPU、不可能ならCPUを使う
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("使用デバイス:", device)

    test_dataset = ArrangeNumDataset(params=params, phase="test")
    test_loader = DataLoader(test_dataset,
                             batch_size=params["batch_size"],
                             shuffle=False,
                             num_workers=4)

    net = create_net(params)

    ys = []
    ypreds = []
    for i in range(params["num_estimate"]):
        print("\n推論:{}/{}".format(i + 1, params["num_estimate"]))
        # ネットワークに重みをロードする
        weight_path = os.path.join("result", params["name"],
                                   "weight/weight" + str(i) + ".pth")
        load_weight = torch.load(weight_path, map_location=device)
        net.load_state_dict(load_weight)
        print("ネットワークに重みをロードしました")
        print("-----推論中-----")

        # 推論
        y, ypred = eval_net(net, test_loader, probability=True, device=device)

        ys.append(y.cpu().numpy())
        ypreds.append(ypred.cpu().numpy())
        # print(confusion_matrix(ys[-1], ypreds[-1]))
        # print(classification_report(ys[-1], ypreds[-1], target_names=params["labels"],
        #                             digits=3, zero_division=0))

    utils.print_result(params, ys, ypreds)
Пример #7
0
    else:
        suffix = pname[p2 + 1:]
    return pname[p1+1:p2] + '.' + suffix


if __name__ == '__main__':
    parser = ArgumentParser(description='Convert params from torch to python'
                            'dict. ')
    parser.add_argument("depth", type=int, choices=[11, 13, 16, 19])
    parser.add_argument("outfile")
    parser.add_argument("--batchnorm", action='store_true',
                        help='use batchnorm or not')

    args = parser.parse_args()

    net = model.create_net(args.depth, 1000, args.batchnorm)
    url = 'vgg%d' % args.depth
    if args.batchnorm:
        url += '_bn'
    torch_dict = model_zoo.load_url(model_urls[url])
    params = {'SINGA_VERSION': 1101}
    # params = net.param_values()
    for pname, pval in zip(net.param_names(), net.param_values()):
        torch_name = rename(pname)
        if torch_name in torch_dict:
            ary = torch_dict[torch_name].numpy()
            ary = np.array(ary, dtype=np.float32)
            if len(ary.shape) == 4:
                params[pname] = np.reshape(ary, (ary.shape[0], -1))
            else:
                params[pname] = np.transpose(ary)
Пример #8
0
def serve(agent, use_cpu, parameter_file, topk=5):
    if use_cpu:
        print('running with cpu')
        dev = device.get_default_device()
        layer.engine = 'singacpp'
    else:
        print("runing with gpu")
        dev = device.create_cuda_gpu()
    agent = agent

    print('Start intialization............')
    net, _ = model.create_net(is_training=False)
    net.load(parameter_file, use_pickle=True)
    net.to_device(dev)
    print('End intialization............')

    labels = np.loadtxt('synset_words.txt', str, delimiter='\t').tolist()
    labels.insert(0, 'empty background')
    while True:
        key, val = agent.pull()
        if key is None:
            time.sleep(0.1)
            continue
        msg_type = MsgType.parse(key)
        if msg_type.is_request():
            try:
                response = ""
                ratio = 0.875
                img = image_tool.load_img(val['image'])
                height, width = img.size[0], img.size[1]
                print(img.size)
                crop_h, crop_w = int(height * ratio), int(width * ratio)
                img = np.array(
                    image_tool.crop(img, (crop_h, crop_w), 'center').resize(
                        (299, 299))).astype(np.float32) / float(255)
                img -= 0.5
                img *= 2
                # img[:,:,[0,1,2]] = img[:,:,[2,1,0]]
                img = img.transpose((2, 0, 1))
                images = np.expand_dims(img, axis=0)
                x = tensor.from_numpy(images.astype(np.float32))
                x.to_device(dev)
                y = net.predict(x)
                prob = np.average(tensor.to_numpy(y), 0)
                # sort and reverse
                idx = np.argsort(-prob)[0:topk]
                for i in idx:
                    response += "%s:%s<br/>" % (labels[i], prob[i])
            except:
                traceback.print_exc()
                response = "Sorry, system error during prediction."
            agent.push(MsgType.kResponse, response)
        elif MsgType.kCommandStop.equal(msg_type):
            print('get stop command')
            agent.push(MsgType.kStatus, "success")
            break
        else:
            print('get unsupported message %s' % str(msg_type))
            agent.push(MsgType.kStatus, "Unknown command")
            break
        # while loop
    print("server stop")
def train(inputfolder,
          outputfolder,
          visfolder,
          trainratio,
          validationratio,
          testratio,
          dev,
          agent,
          max_epoch,
          use_cpu,
          batch_size=100):
    opt = optimizer.SGD(momentum=0.9, weight_decay=0.01)
    agent.push(MsgType.kStatus, 'Downlaoding data...')
    # all_feature, all_label = get_data(os.path.join(inputfolder, 'features.txt'), os.path.join(inputfolder, 'label.txt'))  # PUT THE DATA on/to dbsystem
    all_feature, all_label = get_data(
        os.path.join(inputfolder, 'features.txt'),
        os.path.join(inputfolder, 'label.txt'))  # PUT THE DATA on/to dbsystem
    agent.push(MsgType.kStatus, 'Finish downloading data')
    n_folds = 5
    print("all_label shape: ", all_label.shape)
    all_label = all_label[:, 1]
    # for i, (train_index, test_index) in enumerate(StratifiedKFold(all_label.reshape(all_label.shape[0]), n_folds=n_folds)):
    for i in range(3):
        train_index = np.arange(0, 1404)
        train_feature, train_label = all_feature[train_index], all_label[
            train_index]
        if i == 0:
            print("fold: ", i)
            break
    print("train label sum: ", train_label.sum())
    in_shape = np.array([1, 12, 375])
    trainx = tensor.Tensor(
        (batch_size, int(in_shape[0]), int(in_shape[1]), int(in_shape[2])),
        dev)
    trainy = tensor.Tensor((batch_size, ), dev, tensor.int32)
    num_train_batch = train_feature.shape[0] / batch_size
    idx = np.arange(train_feature.shape[0], dtype=np.int32)

    # height = 12
    # width = 375
    # kernel_y = 3
    # kernel_x = 80
    # stride_y = 1
    # stride_x = 20
    hyperpara = np.array([12, 375, 3, 10, 1, 3])
    height, width, kernel_y, kernel_x, stride_y, stride_x = hyperpara[
        0], hyperpara[1], hyperpara[2], hyperpara[3], hyperpara[4], hyperpara[
            5]
    print('kernel_y: ', kernel_y)
    print('kernel_x: ', kernel_x)
    print('stride_y: ', stride_y)
    print('stride_x: ', stride_x)
    net = model.create_net(in_shape, hyperpara, use_cpu)
    net.to_device(dev)

    test_epoch = 10
    occlude_test_epoch = 100
    for epoch in range(max_epoch):
        if handle_cmd(agent):
            break
        np.random.seed(10)
        np.random.shuffle(idx)
        train_feature, train_label = train_feature[idx], train_label[idx]
        print('Epoch %d' % epoch)

        loss, acc = 0.0, 0.0
        val_loss, val_acc = 0.0, 0.0  # using the first half as validation
        for b in range(int(num_train_batch)):
            x, y = train_feature[b * batch_size:(b + 1) *
                                 batch_size], train_label[b *
                                                          batch_size:(b + 1) *
                                                          batch_size]
            x = x.reshape((batch_size, in_shape[0], in_shape[1], in_shape[2]))
            trainx.copy_from_numpy(x)
            trainy.copy_from_numpy(y)
            grads, (l, a), probs = net.train(trainx, trainy)
            loss += l
            acc += a
            if b < (int(num_train_batch / 2)):
                val_loss += l
                val_acc += a
            for (s, p, g) in zip(net.param_specs(), net.param_values(), grads):
                opt.apply_with_lr(epoch, 0.005, g, p, str(s.name))
            info = 'training loss = %f, training accuracy = %f' % (l, a)
            utils.update_progress(b * 1.0 / num_train_batch, info)
        # put training status info into a shared queue
        info = dict(phase='train',
                    step=epoch,
                    accuracy=acc / num_train_batch,
                    loss=loss / num_train_batch,
                    timestamp=time.time())
        agent.push(MsgType.kInfoMetric, info)
        info = 'training loss = %f, training accuracy = %f' \
            % (loss / num_train_batch, acc / num_train_batch)
        print(info)
        val_info = 'validation loss = %f, validation accuracy = %f' \
           % (val_loss / (int(num_train_batch / 2)), val_acc / (int(num_train_batch / 2)))
        print(val_info)
        if epoch == (max_epoch - 1):
            print('final val_loss: ', val_loss / (int(num_train_batch / 2)))
            np.savetxt(outputfolder + '/final_results.txt',
                       np.full((1), val_loss / (int(num_train_batch / 2))),
                       delimiter=",")
Пример #10
0
                              shuffle=True,
                              num_workers=4)
    test_loader = DataLoader(test_dataset,
                             batch_size=params["batch_size"],
                             shuffle=False,
                             num_workers=4)

    # 損失関数のクラス数に合わせてweightをかけるか決める
    if params["imbalance"] == "lossweight":
        loss_weight = train_dataset.weight.to(device)  # deviceに送らないと動かない
        print("lossweight:", loss_weight.cpu())
    else:
        loss_weight = None
    loss_fn = torch.nn.CrossEntropyLoss(weight=loss_weight)

    net = create_net(params)
    if params["net_params"]["weight_path"]:
        weight_path = os.path.join(params["net_params"]["weight_path"],
                                   "weight" + str(i) + ".pth")
        load_weight = torch.load(weight_path, map_location=device)
        net.load_state_dict(load_weight)
        print('重みをロードしました')

    # 使用する最適化手法を設定する
    if "Adam" == optim_params["name"]:
        optimizer = optim.Adam(net.get_params_lr(
            lr_not_pretrained=optim_params["lr_not_pretrained"],
            lr_pretrained=optim_params["lr_pretrained"]),
                               weight_decay=optim_params["weight_decay"])
    elif "SGD" == optim_params["name"]:
        optimizer = optim.SGD(net.get_params_lr(
Пример #11
0
        suffix = 'running_var'
    else:
        suffix = pname[p2 + 1:]
    return pname[p1+1:p2] + '.' + suffix


if __name__ == '__main__':
    parser = ArgumentParser(description='Convert params from torch to python'
                            'dict. ')
    parser.add_argument("depth", type=int, choices=[121, 169, 201, 161])
    parser.add_argument("outfile")
    parser.add_argument('nb_classes', default=1000, type=int)

    args = parser.parse_args()

    net = model.create_net(args.depth, args.nb_classes)
    url = 'densenet%d' % args.depth
    torch_dict = model_zoo.load_url(model_urls[url])
    params = {'SINGA_VERSION': 1101}

    # resolve dict keys name mismatch problem
    print(len(net.param_names()), len(torch_dict.keys()))
    for pname, pval, torch_name in\
        zip(net.param_names(), net.param_values(), torch_dict.keys()):
        #torch_name = rename(pname)
        ary = torch_dict[torch_name].numpy()
        ary = np.array(ary, dtype=np.float32)
        if len(ary.shape) == 4:
            params[pname] = np.reshape(ary, (ary.shape[0], -1))
        else:
            params[pname] = np.transpose(ary)
Пример #12
0
        suffix = pname[p2 + 1:]
    return pname[p1 + 1:p2] + '.' + suffix


if __name__ == '__main__':
    parser = ArgumentParser(description='Convert params from torch to python'
                            'dict. ')
    parser.add_argument("depth", type=int, choices=[11, 13, 16, 19])
    parser.add_argument("outfile")
    parser.add_argument("--batchnorm",
                        action='store_true',
                        help='use batchnorm or not')

    args = parser.parse_args()

    net = model.create_net(args.depth, 1000, args.batchnorm)
    url = 'vgg%d' % args.depth
    if args.batchnorm:
        url += '_bn'
    torch_dict = model_zoo.load_url(model_urls[url])
    params = {'SINGA_VERSION': 1101}
    # params = net.param_values()
    for pname, pval in zip(net.param_names(), net.param_values()):
        torch_name = rename(pname)
        if torch_name in torch_dict:
            ary = torch_dict[torch_name].numpy()
            ary = np.array(ary, dtype=np.float32)
            if len(ary.shape) == 4:
                params[pname] = np.reshape(ary, (ary.shape[0], -1))
            else:
                params[pname] = np.transpose(ary)
Пример #13
0
        suffix = 'running_var'
    else:
        suffix = pname[p2 + 1:]
    return pname[p1 + 1:p2] + '.' + suffix


if __name__ == '__main__':
    parser = ArgumentParser(description='Convert params from torch to python'
                            'dict. ')
    parser.add_argument("depth", type=int, choices=[121, 169, 201, 161])
    parser.add_argument("outfile")
    parser.add_argument('nb_classes', default=1000, type=int)

    args = parser.parse_args()

    net = model.create_net(args.depth, args.nb_classes)
    url = 'densenet%d' % args.depth
    torch_dict = model_zoo.load_url(model_urls[url])
    params = {'SINGA_VERSION': 1101}

    # resolve dict keys name mismatch problem
    print(len(net.param_names()), len(torch_dict.keys()))
    for pname, pval, torch_name in\
        zip(net.param_names(), net.param_values(), torch_dict.keys()):
        #torch_name = rename(pname)
        ary = torch_dict[torch_name].numpy()
        ary = np.array(ary, dtype=np.float32)
        if len(ary.shape) == 4:
            params[pname] = np.reshape(ary, (ary.shape[0], -1))
        else:
            params[pname] = np.transpose(ary)
Пример #14
0
    elif 'Linear' in module_type:
        idx = linear(m, idx, params, param_names)
    return idx


if __name__ == '__main__':
    parser = ArgumentParser(
        description='Convert params from torch to python '
        'dict. \n resnet could have depth of 18, 34, 101, 152; \n wrn has depth 50; preact has depth 200; addbn has depth 50'
    )
    parser.add_argument("infile", help="torch checkpoint file")
    parser.add_argument("model", choices=['resnet', 'wrn', 'preact', 'addbn'])
    parser.add_argument("depth", type=int, choices=[18, 34, 50, 101, 152, 200])
    args = parser.parse_args()

    net = model.create_net(args.model, args.depth)
    # model.init_params(net)
    m = torchfile.load(args.infile)
    params = {}
    # params = net.param_values()
    param_names = net.param_names()
    traverse(m, 0, params, param_names)
    miss = [name for name in param_names if name not in params]
    if len(miss) > 0:
        print 'The following params are missing from torch file'
        print miss

    outfile = os.path.splitext(args.infile)[0] + '.pickle'
    with open(outfile, 'wb') as fd:
        pickle.dump(params, fd)
Пример #15
0
def train(inputfolder,
          outputfolder,
          visfolder,
          sampleid,
          dev,
          agent,
          max_epoch,
          use_cpu,
          batch_size=100):
    opt = optimizer.SGD(momentum=0.8, weight_decay=0.01)
    agent.push(MsgType.kStatus, 'Downlaoding data...')
    # all_feature, all_label = get_data(os.path.join(inputfolder, 'features_inference.txt'), os.path.join(inputfolder, 'label_inference.txt'))  # PUT THE DATA on/to dbsystem
    all_feature, all_label = get_data(
        os.path.join(inputfolder, 'features_inference.txt'),
        os.path.join(inputfolder,
                     'label_inference.txt'))  # PUT THE DATA on/to dbsystem
    agent.push(MsgType.kStatus, 'Finish downloading data')
    n_folds = 5
    all_label = all_label[:, 1]
    for i, (train_index, test_index) in enumerate(
            StratifiedKFold(all_label.reshape(all_label.shape[0]),
                            n_folds=n_folds)):
        test_index = np.arange(0, 351)
        test_feature, test_label = all_feature[test_index], all_label[
            test_index]
        if i == 0:
            print "fold: ", i
            break
    print "test label sum: ", test_label.sum()
    in_shape = np.array([1, 12, 375])
    testx = tensor.Tensor(
        (test_feature.shape[0], in_shape[0], in_shape[1], in_shape[2]), dev)
    testy = tensor.Tensor((test_feature.shape[0], ), dev, core_pb2.kInt)
    # num_test_batch = test_x.shape[0] / (batch_size)

    # height = 12
    # width = 375
    # kernel_y = 3
    # kernel_x = 80
    # stride_y = 1
    # stride_x = 20
    hyperpara = np.array([12, 375, 3, 10, 1, 3])
    height, width, kernel_y, kernel_x, stride_y, stride_x = hyperpara[
        0], hyperpara[1], hyperpara[2], hyperpara[3], hyperpara[4], hyperpara[
            5]
    net = model.create_net(in_shape, hyperpara, use_cpu)
    print "checkpoint path: ", os.path.join(
        os.path.join(os.environ.get('GEMINI_HOME'),
                     'model/readmission_CNN_code/'), 'parameter_last240')
    net.load(
        os.path.join(
            os.path.join(os.environ.get('GEMINI_HOME'),
                         'model/readmission_CNN_code/'), 'parameter_last240'),
        20)
    net.to_device(dev)
    for name in zip(net.param_names()):
        print "init names: ", name

    test_epoch = 10
    occlude_test_epoch = 100
    for epoch in range(max_epoch):
        if handle_cmd(agent):
            break
        np.random.seed(10)
        print 'Epoch %d' % epoch

        # loss, acc = 0.0, 0.0
        # for b in range(num_train_batch):
        #     x, y = train_feature[b * batch_size:(b + 1) * batch_size], train_label[b * batch_size:(b + 1) * batch_size]
        #    x = x.reshape((batch_size, in_shape[0], in_shape[1], in_shape[2]))
        #     trainx.copy_from_numpy(x)
        #    trainy.copy_from_numpy(y)
        #     grads, (l, a), probs = net.train(trainx, trainy)
        #     loss += l
        #    acc += a
        #     for (s, p, g) in zip(net.param_specs(),
        #                          net.param_values(), grads):
        #         opt.apply_with_lr(epoch, get_lr(epoch), g, p, str(s.name))
        #     info = 'training loss = %f, training accuracy = %f' % (l, a)
        #     utils.update_progress(b * 1.0 / num_train_batch, info)
        # put training status info into a shared queue
        # info = dict(phase='train', step=epoch,
        #             accuracy=acc/num_train_batch,
        #             loss=loss/num_train_batch,
        #             timestamp=time.time())
        # agent.push(MsgType.kInfoMetric, info)
        # info = 'training loss = %f, training accuracy = %f' \
        #     % (loss / num_train_batch, acc / num_train_batch)
        # print info

        if epoch % test_epoch == 0 or epoch == (max_epoch - 1):
            loss, acc = 0.0, 0.0
            x, y = np.copy(test_feature), np.copy(test_label)
            x = x.reshape((x.shape[0], in_shape[0], in_shape[1], in_shape[2]))
            testx.copy_from_numpy(x)
            testy.copy_from_numpy(y)
            l, a, probs = net.evaluate(testx, testy)
            loss += l
            acc += a
            print 'testing loss = %f, accuracy = %f' % (loss, acc)
            # put test status info into a shared queue
            info = dict(phase='test',
                        step=epoch,
                        accuracy=acc,
                        loss=loss,
                        timestamp=time.time())
            agent.push(MsgType.kInfoMetric, info)
            print 'self calculate test auc = %f' % auroc(
                softmax(tensor.to_numpy(probs))[:, 1].reshape(-1, 1),
                y.reshape(-1, 1))
            print 'self calculate test accuracy = %f' % cal_accuracy(
                softmax(tensor.to_numpy(probs))[:, 1].reshape(-1, 1),
                y.reshape(-1, 1))
            cnn_metric_dict = {}  # for output to json
            cnn_metric_dict['Number of Samples: '] = y.shape[0]
            cnn_sensitivity, cnn_specificity, cnn_harmonic = HealthcareMetrics(
                softmax(tensor.to_numpy(probs))[:, 1].reshape(-1, 1),
                y.reshape(-1, 1), 0.25)
            cnn_metric_dict['AUC: '] = auroc(
                softmax(tensor.to_numpy(probs))[:, 1].reshape(-1, 1),
                y.reshape(-1, 1))
            # cnn_metric_dict['accuracy: '] = cal_accuracy(softmax(tensor.to_numpy(probs))[:,1].reshape(-1, 1), y.reshape(-1, 1))
            cnn_metric_dict['Sensitivity: '] = cnn_sensitivity
            cnn_metric_dict['Specificity: '] = cnn_specificity

            try:
                with open(os.path.join(visfolder, 'cnn_metric_info.json'),
                          'w') as cnn_metric_info_writer:
                    # json.dump(cnn_metric_dict, cnn_metric_info_writer)
                    cnn_metric_info_writer.write('[')
                    cnn_metric_info_writer.write(
                        '\"Number of Patients: %d\", ' % (y.shape[0]))
                    cnn_metric_info_writer.write('\"AUC: %s\", ' % (str(
                        int(100 * round((auroc(
                            softmax(tensor.to_numpy(probs))[:, 1].reshape(
                                -1, 1), y.reshape(-1, 1))), 2))) + '%'))
                    cnn_metric_info_writer.write(
                        '\"Sensitivity: %s\", ' %
                        (str(int(100 * round(cnn_sensitivity, 2))) + '%'))
                    cnn_metric_info_writer.write(
                        '\"Specificity: %s\" ' %
                        (str(int(100 * round(cnn_specificity, 2))) + '%'))
                    cnn_metric_info_writer.write(']')
            except Exception as e:
                os.remove(os.path.join(visfolder, 'cnn_metric_info.json'))
                print('output cnn_metric_info.json failed: ', e)
            if epoch == (max_epoch - 1):
                np.savetxt(os.path.join(
                    os.path.join(os.environ.get('GEMINI_HOME'),
                                 'model/readmission_CNN_code/'),
                    'readmitted_prob.csv'),
                           softmax(tensor.to_numpy(probs))[:, 1],
                           fmt='%6f',
                           delimiter=",")

        # if epoch == (max_epoch-1):
        if epoch == (max_epoch):
            print "occclude test"
            # occlude test data
            height_dim = (height - kernel_y) / stride_y + 1
            width_dim = (width - kernel_x) / stride_x + 1
            meta_data = np.array([
                height_dim, height, kernel_y, stride_y, width_dim, width,
                kernel_x, stride_x
            ])
            np.savetxt(os.path.join(outputfolder, 'meta_data.csv'),
                       meta_data,
                       fmt='%6f',
                       delimiter=",")  #modify here
            true_label_prob_matrix = np.zeros([(height_dim * width_dim), 1])
            for height_idx in range(height_dim):
                for width_idx in range(width_dim):
                    occlude_test_feature, occlude_test_label = get_occlude_data(np.copy(test_feature), np.copy(test_label), \
                    height, width, height_idx, width_idx, kernel_y, kernel_x, stride_y, stride_x)
                    loss, acc = 0.0, 0.0
                    x, y = occlude_test_feature, occlude_test_label  # !!! where are the labels?
                    x = x.reshape(
                        (x.shape[0], in_shape[0], in_shape[1], in_shape[2]))
                    testx.copy_from_numpy(x)
                    testy.copy_from_numpy(y)
                    l, a, probs = net.evaluate(testx, testy)
                    y_scores = softmax(tensor.to_numpy(probs))[:, 1]
                    sum_true_label_prob = 0.0
                    for i in range(
                            0, x.shape[0]
                    ):  # !!! y_scores ~~ the probability of 1 !!!
                        if y[i] == 1:
                            sum_true_label_prob = sum_true_label_prob + y_scores[
                                i]
                        elif y[i] == 0:
                            sum_true_label_prob = sum_true_label_prob + (
                                1 - y_scores[i])
                    true_label_prob_matrix[
                        height_idx * width_dim + width_idx,
                        0] = sum_true_label_prob / x.shape[0]
            print "occlude x shape: ", x.shape
            np.savetxt(os.path.join(
                os.path.join(os.environ.get('GEMINI_HOME'),
                             'model/readmission_CNN_code/'),
                'true_label_prob_matrix.csv'),
                       true_label_prob_matrix,
                       fmt='%6f',
                       delimiter=",")  #modify here
    #for (s, p) in zip(net.param_specs(), net.param_values()):
    #    print "last epoch param name: ", s
    #    print "last epoch param value: ", p.l2()
    # net.save('parameter_last')
    print "begin explain"
    # explain_occlude_area(np.copy(test_feature), np.copy(test_label), os.path.join(outputfolder,'readmitted_prob.csv'), os.path.join(outputfolder,'true_label_prob_matrix.csv'), os.path.join(outputfolder,'meta_data.csv'), top_n = 20)
    print "begin explain format out"
    top_n = 30
    print "top_n: ", top_n
    explain_occlude_area_format_out(
        sampleid,
        visfolder,
        np.copy(test_feature),
        np.copy(test_label),
        os.path.join(
            os.path.join(os.environ.get('GEMINI_HOME'),
                         'model/readmission_CNN_code/'),
            'readmitted_prob.csv'),
        os.path.join(
            os.path.join(os.environ.get('GEMINI_HOME'),
                         'model/readmission_CNN_code/'),
            'true_label_prob_matrix.csv'),
        os.path.join(
            os.path.join(os.environ.get('GEMINI_HOME'),
                         'model/readmission_CNN_code/'), 'meta_data.csv'),
        top_n=top_n)
Пример #16
0
        idx = batchnorm(m, idx, params, param_names)
    elif 'Linear' in module_type:
        idx = linear(m, idx, params, param_names)
    return idx


if __name__ == '__main__':
    parser = ArgumentParser(description='Convert params from torch to python '
            'dict. \n resnet could have depth of 18, 34, 101, 152; \n'
            'wrn has depth 50; preact has depth 200; addbn has depth 50')
    parser.add_argument("infile", help="torch checkpoint file")
    parser.add_argument("model", choices=['resnet', 'wrn', 'preact', 'addbn'])
    parser.add_argument("depth", type=int, choices=[18, 34, 50, 101, 152, 200])
    args = parser.parse_args()

    net = model.create_net(args.model, args.depth)
    # model.init_params(net)
    m = torchfile.load(args.infile)
    params = {}
    # params = net.param_values()
    param_names = net.param_names()
    traverse(m, 0, params, param_names)
    miss = [name for name in param_names if name not in params]
    if len(miss) > 0:
        print('The following params are missing from torch file')
        print(miss)

    outfile = os.path.splitext(args.infile)[0] + '.pickle'
    with open(outfile, 'wb') as fd:
        pickle.dump(params, fd)
Пример #17
0
from dataset import Dataset
from model import create_net, compile_model

#obtain and prepare data
dataset = Dataset(shuffle=True, normalize=True, subtract_mean=True)

#define neural network architecture
model = create_net(dataset.example_input_shape(), dataset.num_classes())

#choose training algorithm and prepare model for training
compile_model(model)

#train model
train_x, train_y = dataset.get_training_data()
model.fit(train_x, train_y, epochs=5, batch_size=32, verbose=1)

#evaluate model
test_x, test_y = dataset.get_testing_data()
print(1 - model.evaluate(test_x, test_y, verbose=0)[1])