예제 #1
0
def extract_feature(model,
                    dataloader,
                    save_path,
                    load_from_disk=True,
                    model_path=''):
    if load_from_disk:
        model = models.Network(base_net=args.model_name,
                               n_class=args.num_class)
        model.load_state_dict(torch.load(model_path))
        model = model.to(DEVICE)
    model.eval()
    correct = 0
    fea_all = torch.zeros(1, 1 + model.base_network.output_num()).to(DEVICE)
    with torch.no_grad():
        for inputs, labels in dataloader:
            inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
            feas = model.get_features(inputs)
            labels = labels.view(labels.size(0), 1).float()
            x = torch.cat((feas, labels), dim=1)
            fea_all = torch.cat((fea_all, x), dim=0)
            outputs = model(inputs)
            preds = torch.max(outputs, 1)[1]
            correct += torch.sum(preds == labels.data.long())
        test_acc = correct.double() / len(dataloader.dataset)
    fea_numpy = fea_all.cpu().numpy()
    np.savetxt(save_path, fea_numpy[1:], fmt='%.6f', delimiter=',')
    print('Test acc: %f' % test_acc)
예제 #2
0
def add_network(values):
    session = get_session()
    with session.begin():
        network_ref = models.Network()
        network_ref.update(values)
        network_ref.save(session=session)
    return network_ref.id
 def build_model(self):
     """Creates and initializes the shared and controller models."""
     if self.args.network_type == 'Net':
         self.shared = models.Network(self.args)
     else:
         raise NotImplementedError(f'Network type '
                                   f'`{self.args.network_type}` is not '
                                   f'defined')
     self.controller = models.Controller(self.args)
예제 #4
0
def network():
    if request.method == 'POST':
        json_data = request.get_json(force=True)
        net = models.Network(json_data['name'], json_data['prefix'])
        db.session.add(net)
        db.session.commit()
    if request.method == 'GET':
        result = models.Network.query.first()
        network_schema = models.NetworkSchema()
        output = network_schema.dump(result).data
    return jsonify({'net': output})
예제 #5
0
def extract_feature(model,
                    dataloader,
                    save_path,
                    load_from_disk=False,
                    model_path=''):
    if load_from_disk:
        model = models.Network(base_net='alexnet', n_class=31)
        model.load_state_dict(torch.load(model_path)).to(DEVICE)
    model.eval()
    with torch.no_grad():
        for inputs, labels in dataloader:
            inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
            feas = model.get_features(inputs)
            labels = labels.view(labels.size(0), 1).float()
            x = torch.cat((feas, labels), dim=1)
    fea_numpy = x.cpu().numpy()
    np.savetxt(save_path, fea_numpy[1:], fmt='%.6f', delimiter=',')
예제 #6
0
파일: main.py 프로젝트: tackelua/Qt
 def GetOrCreateNetwork(self, download_bandwidth_kbps,
                        upload_bandwidth_kbps, round_trip_time_ms,
                        packet_loss_rate, protocol_str, load_type):
     network_type = NetworkPrettyString(download_bandwidth_kbps,
                                        upload_bandwidth_kbps,
                                        round_trip_time_ms,
                                        packet_loss_rate, protocol_str,
                                        load_type)
     query = models.Network.all()
     query.filter("network_type = ", network_type)
     networks = query.fetch(1)
     if networks:
         return networks[0]
     network = models.Network(
         network_type=network_type,
         download_bandwidth_kbps=download_bandwidth_kbps,
         upload_bandwidth_kbps=upload_bandwidth_kbps,
         round_trip_time_ms=round_trip_time_ms,
         packet_loss_rate=packet_loss_rate,
         protocol=protocol_str,
         load_type=load_type)
     network.put()
     return network
예제 #7
0
    data_test = data_load.load_data(data_folder + domain['tar'] + 'images/',
                                    BATCH_SIZE['tar'], 'test')
    data_train = data_load.load_data(data_folder + domain['src'] + 'images/',
                                     BATCH_SIZE['src'],
                                     'train',
                                     train_val_split=True,
                                     train_ratio=.8)
    dataloaders['train'], dataloaders['val'], dataloaders['test'] = data_train[
        0], data_train[1], data_test
    print('Data loaded: Source: {}, Target: {}'.format(args.source,
                                                       args.target))

    # Finetune
    if args.finetune == 1:
        print('Begin fintuning...')
        net = models.Network(base_net=args.model_name,
                             n_class=args.num_class).to(DEVICE)
        criterion = nn.CrossEntropyLoss()
        optimizer = get_optimizer(net)
        if not os.path.exists('save_model/'):
            os.makedir('save_model/')
        save_path = 'save_model/best_{}_{}.pth'.format(args.model_name,
                                                       args.source)
        model_best, best_acc, acc_hist = finetune(net,
                                                  dataloaders,
                                                  optimizer,
                                                  criterion,
                                                  save_path,
                                                  use_lr_schedule=False)
        print('Finetune completed!')

    # Extract features from finetuned model
예제 #8
0
''' 进行数据读入,id为文件夹名称 '''
for id in range(2):
    id_string = str(id)
    for filename in glob(prev_dir + id_string + '\\*.jpg'):
        print(filename)
        ''' 提取出图片编号,并设置data_label[编号] = value '''
        position = filename.replace(prev_dir + id_string + '\\', '')
        position = position.replace(after_dir, '')
        print(position)
        data_label[int(position)] = id

# if there is GPU, choose GPU else CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('use device ', device)
''' 生成网络 '''
net = models.Network()
net = net.float()
net.to(device=device)

all_data = models.Reader(prev_dir, data_label)

batch_size = 4

test_loader = DataLoader(all_data, batch_size=batch_size, shuffle=True)

print('Load data finish, ready to train')

PATH = './model.pth'
net.load_state_dict(torch.load(PATH))

correct = 0
    return len(intersection) / len(union)


device = 'cuda' if torch.cuda.is_available() else 'cpu'
# specify directory of a learned model
loaddir = sys.argsv[1]

# load trained parameters to a network
with open(loaddir + 'args.pkl', 'rb') as f:
    variables = pickle.load(f)
args = variables[0]

params = torch.load(loaddir + 'cnn_microscopy.pth', map_location=device)

model = models.Network().to(device)
model.load_state_dict(params)
model.eval()

# image size of a test image
image_size = torch.Tensor([64, 64, 4]).to(device)
# minimum coordinates of voxels of test image
defaults = utils.gen_defaults(1/image_size, device, centering=False)
lossclass = loss.LocationAndConfidenceLoss(defaults, 1/image_size, default_centering=False, negapos_ratio=3).to(device)


# add random or fixed lateral drift
# translation = numpy.random.randint(-2, 2, size=(4, 2))
# translation[2] = [0, 0]
resolution_high = args.resolution_xy_low / args.scale_xy
z_list = numpy.linspace(0, args.resolution_depth_low * (args.low_depth-1), args.low_depth)
예제 #10
0
log_name = './log/full_model'
cpt_name = '/full_model_'

writer = SummaryWriter(log_name)

print("torch.cuda.is_available: ", torch.cuda.is_available())
print("torch.cuda.device_count: ", torch.cuda.device_count())

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
multiGPUs = [0, 1, 2, 3]

netT = models.ResNet()
sketExt = models.PWCExtractor()
imagExt = models.PWCExtractor()
flowEst = models.Network()
blenEst = models.blendNet()
flowRef = models.UNet(14, 8)
ImagRef = model_deform.DeformUNet(21, 15)

W = 576
H = 384
flowBackWarp = models.backWarp(W, H)
occlusiCheck = models.occlusionCheck(W, H)

if torch.cuda.device_count() >= 1:
    netT = nn.DataParallel(netT, device_ids=multiGPUs)
    sketExt = nn.DataParallel(sketExt, device_ids=multiGPUs)
    imagExt = nn.DataParallel(imagExt, device_ids=multiGPUs)
    flowEst = nn.DataParallel(flowEst, device_ids=multiGPUs)
    blenEst = nn.DataParallel(blenEst, device_ids=multiGPUs)
예제 #11
0
def main():
    parser = argparse.ArgumentParser(description='DeepLoco')
    parser.add_argument('--batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for training (default: 10)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=50,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-3,
                        metavar='M',
                        help='learning rate (default: 0.001)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disable CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--low-patchsize',
        type=int,
        default=64,
        metavar='N',
        help='patch size of data in low resolution (default: 16)')
    parser.add_argument('--low-depth',
                        type=int,
                        default=4,
                        metavar='N',
                        help='depth of data in low resolution (default: 4)')
    parser.add_argument(
        '--resolution-xy-low',
        type=int,
        default=192,
        metavar='N',
        help=
        'resolution(nm/pixel) of xy plane in low resolution image (default: 192)'
    )
    parser.add_argument(
        '--resolution-depth-low',
        type=int,
        default=400,
        metavar='N',
        help=
        'resolution(nm/plane) of depth in low resolution image (default: 400)')
    parser.add_argument(
        '--scale-xy',
        type=int,
        default=8,
        metavar='N',
        help='scaling factor of horizontal direction (default: 8)')
    parser.add_argument('--scale-depth',
                        type=int,
                        default=8,
                        metavar='N',
                        help='scaling factor of depth (default: 8)')
    parser.add_argument('--min-weight',
                        type=float,
                        default=1.0,
                        metavar='F',
                        help='minimum weight of a molecule (default: 1.0)')
    parser.add_argument('--max-weight',
                        type=float,
                        default=1.0,
                        metavar='F',
                        help='maximum weight of a molecule (default: 1.0)')
    parser.add_argument(
        '--num-particle-train',
        type=int,
        default=5,
        metavar='N',
        help=
        'number of fluorescent particles in one frame in training data (default: 3)'
    )
    parser.add_argument(
        '--num-particle-test',
        type=int,
        default=3,
        metavar='N',
        help=
        'number of fluorescent particles in one frame int test data (default: 1)'
    )
    parser.add_argument('--num-data-train',
                        type=int,
                        default=10000,
                        metavar='N',
                        help='number of training data (default: 10000)')
    parser.add_argument('--num-data-test',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='number of test data (default: 1000)')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument(
        '--savedir',
        default='./data/learned_models/',
        help=
        'save directory is created inside this directory (default="data/learned_models")'
    )

    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    numpy.random.seed(args.seed)

    device = torch.device('cuda' if use_cuda else 'cpu')

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # random lateral drifts within plus/minus 48 nm
    resolution_high = args.resolution_xy_low / args.scale_xy
    z_list = numpy.linspace(0,
                            args.resolution_depth_low * (args.low_depth - 1),
                            args.low_depth)
    aug = augmentation.RandomTranslation(2, 2, z_list, resolution_high)

    # train set
    trainset = datasets.RandomDataset(
        low_patchsize=args.low_patchsize,
        low_depth=args.low_depth,
        resolution_xy_low=args.resolution_xy_low,
        resolution_depth_low=args.resolution_depth_low,
        scale_xy=args.scale_xy,
        scale_depth=args.scale_depth,
        min_weight=args.min_weight,
        max_weight=args.max_weight,
        num_particles=args.num_particle_train,
        data_size=args.num_data_train,
        augmentation=aug)
    train_loader = torch.utils.data.DataLoader(trainset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               collate_fn=datasets.collate_fn,
                                               **kwargs)

    # test set (parameters are same as train set)
    testset = datasets.RandomDataset(
        low_patchsize=args.low_patchsize,
        low_depth=args.low_depth,
        resolution_xy_low=args.resolution_xy_low,
        resolution_depth_low=args.resolution_depth_low,
        scale_xy=args.scale_xy,
        scale_depth=args.scale_depth,
        min_weight=args.min_weight,
        max_weight=args.max_weight,
        num_particles=args.num_particle_test,
        data_size=args.num_data_test)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              collate_fn=datasets.collate_fn,
                                              **kwargs)

    # minimum and maximum coordinate of x, y, z of the target space
    xlim = (0, args.resolution_xy_low * args.low_patchsize)
    ylim = (0, args.resolution_xy_low * args.low_patchsize)
    zlim = (0, args.resolution_depth_low * args.low_depth)
    # minimum coordinate
    coord_min = torch.Tensor((xlim[0], ylim[0], zlim[0]))
    # maximum coordinate
    coord_max = torch.Tensor((xlim[1], ylim[1], zlim[1]))
    # regression model
    model = models.Network().to(device)

    # size of the image
    image_size = torch.Tensor(
        [args.low_patchsize, args.low_patchsize, args.low_depth]).to(device)
    # minimum coordinates of each voxel
    defaults = utils.gen_defaults(1 / image_size, device, centering=False)
    # loss function (binary cross entropy + l1 distance)
    lossfunc = loss.LocationAndConfidenceLoss(defaults,
                                              1 / image_size,
                                              default_centering=False,
                                              negapos_ratio=3).to(device)

    trainloss_history = []
    testloss_history = []

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # iterate train and test
    for epoch in range(1, args.epochs + 1):
        trainloss = train(args, model, device, train_loader, optimizer,
                          lossfunc, epoch)
        testloss = test(args, model, device, test_loader, lossfunc)

        trainloss_history.append(trainloss)
        testloss_history.append(testloss)

        # save initial model
        if epoch == 1 and args.save_model:
            dirname = make_savedir(args.savedir)

    if args.save_model:
        model = model.cpu()
        torch.save(model.state_dict(), dirname + '/cnn_microscopy.pth')
        with open(dirname + '/args.pkl', mode='wb') as f:
            pickle.dump([
                args, train_loader, test_loader, trainloss_history,
                testloss_history
            ], f)
예제 #12
0
def run_experiment(args, seed):
    if args.train is not None:
        train_dl = MTCDataLoader(args.train)
        if args.dev is None:
            train_data, dev_data = train_dl.train_test_split(
                test_size=args.devsize)
        else:
            dev_dl = MTCDataLoader(args.dev)
            train_data = list(train_dl.sequences())
            dev_data = list(dev_dl.sequences())
        if args.test is not None:
            test_dl = MTCDataLoader(args.test)
            test_data = list(test_dl.sequences())
        else:
            test_data = []
        print(
            f'Train: {len(train_data)}, Dev: {len(dev_data)}, Test: {len(test_data)}'
        )

    elif args.dataconf:
        if args.dataconf not in CONFIGS:
            print(f"Error. {args.dataconf} is not a valid data configuration.",
                  file=sys.stderr)
            print(f"Choose one of: {' '.join(DataConf.confs.keys())}",
                  file=sys.stderr)
            raise SystemExit
        train_data, dev_data, test_data = DataConf().getData(
            args.dataconf, args.devsize, args.testsize, args.cross_class_size)
        if args.test:
            print("Warning. Command line argument --test_data ignored.",
                  file=sys.stderr)

    if args.savetrain:
        MTCDataLoader.writeJSON(args.savetrain, train_data)
    if args.savedev:
        MTCDataLoader.writeJSON(args.savedev, dev_data)
    if args.savetest:
        MTCDataLoader.writeJSON(args.savetest, test_data)

    cat_encoders = [CategoricalEncoder(f) for f in args.categorical_features]
    scaler = (StandardScaler if args.scaler == 'zscore' else
              MinMaxScaler if args.scaler == 'minmax' else IdentityScaler)
    cont_encoders = [
        ContinuousEncoder(f, scaler=scaler) for f in args.continuous_features
    ]
    encoders = cat_encoders + cont_encoders

    train_selector, dev_selector = None, None
    if args.precompute_examples:
        if args.example_type == 'pairs':
            train_selector = samplers.PairSelector(
                pos_neg_ratio=args.pn_ratio, random_sample=args.sample_ratio)
            dev_selector = samplers.PairSelector(pos_neg_ratio=args.pn_ratio)
        else:
            train_selector = samplers.TripletSelector(
                sample_ratio=args.sample_ratio)
            dev_selector = samplers.TripletSelector(
                sample_ratio=args.sample_ratio)

    dataset_constructor = (datasets.Dataset if args.online_sampler else
                           datasets.DupletDataset if args.example_type
                           == 'pairs' else datasets.TripletDataset)

    train = dataset_constructor(train_data,
                                *encoders,
                                batch_size=args.batch_size,
                                selector=train_selector,
                                label='tunefamily',
                                train=True).fit()

    dev = dataset_constructor(dev_data,
                              *encoders,
                              batch_size=args.batch_size,
                              selector=dev_selector,
                              label='tunefamily',
                              train=False).fit()

    if args.precompute_examples:
        print(train_selector)
        print(dev_selector)

    collate_fn = datasets.collate_fn

    if args.balanced_batch_sampler:
        train_batch_sampler = datasets.BalancedBatchSampler(
            train.labels, n_classes=args.n_classes, n_samples=args.n_samples)
        dev_batch_sampler = datasets.BalancedBatchSampler(
            dev.labels, n_classes=args.n_classes, n_samples=args.n_samples)
        train_loader = DataLoader(train,
                                  batch_sampler=train_batch_sampler,
                                  collate_fn=collate_fn,
                                  num_workers=args.n_workers)
        dev_loader = DataLoader(dev,
                                batch_sampler=dev_batch_sampler,
                                collate_fn=collate_fn,
                                num_workers=args.n_workers)
    elif not args.precompute_examples:
        train_loader = DataLoader(train,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  collate_fn=collate_fn,
                                  num_workers=args.n_workers)
        dev_loader = DataLoader(dev,
                                batch_size=args.batch_size,
                                collate_fn=collate_fn,
                                num_workers=args.n_workers)
    else:
        train_loader, dev_loader = datasets.DataLoader(
            train), datasets.DataLoader(dev)

    device = 'cuda' if args.cuda else 'cpu'

    emb_dims = [(encoder.size(), args.emb_dim) for encoder in cat_encoders]
    if args.model.lower() == 'rnn':
        network = models.RNN(emb_dims,
                             args.hid_dim,
                             cont_features=len(cont_encoders),
                             n_layers=args.n_layers,
                             cell=args.cell,
                             dropout=args.dropout,
                             bidirectional=args.bidirectional)
    elif args.model.lower() == 'cnn':
        network = models.CNN(emb_dims,
                             cont_features=len(cont_encoders),
                             kernel_sizes=tuple(args.kernel_sizes),
                             highway_layers=args.highway_layers,
                             out_channels=args.out_channels,
                             dropout=args.dropout)
    else:
        network = models.CNNRNN(emb_dims,
                                cont_features=len(cont_encoders),
                                kernel_sizes=tuple(args.kernel_sizes),
                                highway_layers=args.highway_layers,
                                out_channels=args.out_channels,
                                dropout=args.dropout,
                                cell=args.cell,
                                bidirectional=args.bidirectional,
                                n_layers=args.n_layers)

    if args.example_type == 'pairs':
        if not args.online_sampler:
            if args.loss == 'cosine':
                loss_fn = models.CosinePairLoss(weight=args.weight,
                                                margin=args.margin)
            else:
                loss_fn = models.EuclideanPairLoss(margin=args.margin)
            model = models.TwinNetwork(network,
                                       loss_fn).to(device)  # margin 0.16, 0.4
        else:
            if args.loss == 'cosine':
                loss_fn = models.OnlineCosinePairLoss(
                    samplers.HardNegativePairSelector(),
                    weight=args.weight,
                    margin=args.margin,
                    cutoff=args.cutoff_cosine)
            else:
                loss_fn = models.OnlineEuclideanPairLoss(
                    samplers.HardNegativePairSelector(), margin=args.margin)
            model = models.Network(network, loss_fn).to(device)
    else:
        if not args.online_sampler:
            if args.loss == 'cosine':
                loss_fn = models.CosineTripletLoss(margin=args.margin)
            else:
                loss_fn = models.EuclidianTripletLoss(margin=args.margin)
            model = models.TripletNetwork(network, loss_fn).to(device)
        else:
            if args.loss == 'cosine':
                loss_fn = models.OnlineCosineTripletLoss(
                    samplers.NegativeTripletSelector(
                        method=args.negative_pair_selector,
                        margin=args.margin),
                    margin=args.margin)
            else:
                loss_fn = models.OnlineEuclideanTripletLoss(
                    samplers.NegativeTripletSelector(
                        method=args.negative_pair_selector,
                        margin=args.margin),
                    margin=args.margin)
            model = models.Network(network, loss_fn).to(device)

    print(model)

    for embedding in model.network.embs:
        embedding.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        'min',
        verbose=True,
        patience=args.lr_scheduler,
        threshold=1e-4,
        cooldown=5)
    print(
        f'Number of parameters: {sum(p.nelement() for p in model.parameters())}'
    )

    try:
        early_stop = fit_model(train_loader,
                               dev_loader,
                               model,
                               optimizer,
                               scheduler,
                               args.epochs,
                               args.log_interval,
                               plot=False,
                               patience=args.patience,
                               early_stop_score=args.early_stop_score,
                               eval_metric=args.loss)
        best_score = early_stop['best']
        fails = early_stop['fails']
        best_params = early_stop['best_params']
        val_scores = early_stop['val_scores']
    except EarlyStopException as e:
        print("Early stopping training")
        best_score = e.best
        fails = e.fails
        best_params = e.best_params
        val_scores = e.val_scores

    model.load_state_dict(best_params)
    model.eval()
    # serialize model if necessary

    print("Best", args.early_stop_score, best_score)

    if args.save_encodings is not None and args.dev is not None:
        utils.save_encodings(dev, model, args.save_encodings, 'dev')

    if args.test is not None:
        train_label_set = list(set(train_loader.dataset.labels))
        test = dataset_constructor(test_data,
                                   *encoders,
                                   batch_size=args.batch_size,
                                   label='tunefamily',
                                   train=False).fit()
        test_scores = metrics.evaluate_ranking(model,
                                               test,
                                               train_label_set=train_label_set,
                                               metric=args.loss)
        message = 'Testing:\n'
        message += f'  silouhette: {test_scores["silhouette"]:.3f}\n'
        message += f'  MAP: {test_scores["MAP"]:.3f}\n'
        message += f'  MAP (seen): {test_scores["MAP seen labels"]:.3f}\n'
        message += f'  MAP (unseen): {test_scores["MAP unseen labels"]:.3f}\n'
        message += f'  Margin: {test_scores["margin_score"]:.3f}'
        print(message)

    with open(f'{args.results_dir}/{args.results_path}', 'a+') as f:
        f.write(
            json.dumps(
                {
                    "params": vars(args),
                    "dev_score": float(best_score),
                    "val_scores": val_scores,
                    "test_scores":
                    test_scores if args.test is not None else {},
                    "fails": fails,
                    "seed": seed,
                    "now": str(datetime.now())
                },
                cls=NumpyEncoder) + '\n')

    if args.save_encodings is not None and args.test is not None:
        utils.save_encodings(test, model, args.save_encodings, 'test')

    return model