def extract_feature_to_file(path, name, model, transform):
    data_loader = ImageDataLoader(path,
                                  name=name,
                                  transforms=transform,
                                  shuffle=False)

    features = extract_feature(model, data_loader)
    np.save(f'{name}_features.npy', features.numpy())
示例#2
0
def get_train_loader(args, img_size):
    transform = transforms.Compose([#transforms.Resize(img_size),
                                    transforms.RandomCrop(img_size),
                                    transforms.RandomHorizontalFlip(),
                                    transforms.ToTensor(),
                                    transforms.Normalize(mean=(0.5, 0.5, 0.5),std=(0.5, 0.5, 0.5))])

    dataset_args = [
                    args.dataset,
                    transform
                ]

    return ImageDataLoader(*dataset_args)
示例#3
0
def main():
    sess = tf.Session()
    image_util = ImageUtil()
    dataloader = ImageDataLoader(IMAGES_DIR, image_util)

    train_dataset = dataloader.get_dataset('train', NUM_TRAIN_IMAGES)
    val_dataset = dataloader.get_dataset('val', NUM_VAL_IMAGES)
    test_dataset = dataloader.get_dataset('test', NUM_TEST_IMAGES)

    model = ImageColorization().model
    model.compile(optimizer=tf.train.AdamOptimizer(),
                  loss={'colorization_output': 'mean_squared_error', 'classification_output': 'categorical_crossentropy'},
                loss_weights={'colorization_output': 1., 'classification_output': 1./300})

    print 'Starting training'
    for i in range(NUM_EPOCHS):
        model.fit(train_dataset['greyscale'], {'colorization_output': train_dataset['color'], 'classification_output': train_dataset['class']}, epochs=1)
        ab_unscaled = model.predict(val_dataset['greyscale'])[0]
        lab = np.concatenate([val_dataset['greyscale']*100, ab_unscaled*200-100], axis=3)
        show_predicted_values(ab_unscaled, lab)
        
    print 'Finished training'

    results = model.predict(test_dataset['greyscale'])
示例#4
0
    os.mkdir(output_dir)

net = CrowdCounter()

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
mse = 0.0
mape = 0.0
total_count = 0.0
#load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)

for blob in data_loader:
    im_data = blob['data']
    gt_data = blob['gt_density']
    density_map = net(im_data, gt_data)
    density_map = density_map.data.cpu().numpy()
    gt_count = np.sum(gt_data)
    et_count = np.sum(density_map)
    mae += abs(gt_count - et_count)
    mape += abs(gt_count - et_count) / gt_count
    mse += ((gt_count - et_count) * (gt_count - et_count))
    total_count += gt_count
    if vis:
示例#5
0
    if exp_name is None:
        exp_name = save_exp_name
        exp = cc.create_experiment(exp_name)
    else:
        exp = cc.open_experiment(exp_name)

# training
train_loss = 0
step_cnt = 0
re_cnt = False
t = Timer()
t.tic()

data_loader = ImageDataLoader(train_path,
                              train_gt_path,
                              shuffle=True,
                              gt_downsample=True,
                              pre_load=True)
data_loader_val = ImageDataLoader(val_path,
                                  val_gt_path,
                                  shuffle=False,
                                  gt_downsample=True,
                                  pre_load=True)
best_mae = sys.maxsize

for epoch in range(start_step, end_step + 1):
    step = -1
    train_loss = 0
    for blob in data_loader:
        step = step + 1
        im_data = blob['data']
示例#6
0
#Tensorboard  config
use_tensorboard = True
save_exp_name = method + '_' + dataset_name + '_' + 'v1'
remove_all_log = False   # remove all historical experiments in TensorBoardO
exp_name = None # the previous experiment name in TensorBoard



rand_seed = 64678    
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)
    
#loadt training and validation data
data_loader = ImageDataLoader(train_path, train_gt_path, shuffle=True, gt_downsample=False, pre_load=True)
class_wts = data_loader.get_classifier_weights()
data_loader_val = ImageDataLoader(val_path, val_gt_path, shuffle=False, gt_downsample=False, pre_load=True)

#load net and initialize it
net = CrowdCounter(ce_weights=class_wts)
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()

params = list(net.parameters())
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr) #优化算法

if not os.path.exists(output_dir):
    os.mkdir(output_dir)
示例#7
0
def train_model():
    method = 'AMCNN'  # method name - used for saving model file
    dataset_name = 'SHA'  # dataset name - used for saving model file
    output_dir = './saved_models_SHA/'  # model files are saved here

    # train and validation paths
    train_path = './data/formatted_trainval_A/shanghaitech_part_A_patches_9/train'
    train_gt_path = './data/formatted_trainval_A/shanghaitech_part_A_patches_9/train_den'
    val_path = './data/formatted_trainval_A/shanghaitech_part_A_patches_9/val'
    val_gt_path = './data/formatted_trainval_A/shanghaitech_part_A_patches_9/val_den'

    # training configuration
    start_step = 0
    end_step = 2000
    lr = 0.00001
    momentum = 0.9
    disp_interval = 500
    log_interval = 250

    rand_seed = 64678
    if rand_seed is not None:
        np.random.seed(rand_seed)
        torch.manual_seed(rand_seed)
        torch.cuda.manual_seed(rand_seed)

    # loadt training and validation data
    data_loader = ImageDataLoader(train_path,
                                  train_gt_path,
                                  shuffle=True,
                                  gt_downsample=False,
                                  pre_load=True)
    class_wts = data_loader.get_classifier_weights()
    data_loader_val = ImageDataLoader(val_path,
                                      val_gt_path,
                                      shuffle=False,
                                      gt_downsample=False,
                                      pre_load=True)

    # load net and initialize it
    net = CrowdCounter(ce_weights=class_wts)
    weights_normal_init(net, dev=0.01)
    net.cuda()
    net.train()

    params = list(net.parameters())
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        net.parameters()),
                                 lr=lr)

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # training
    train_loss = 0
    best_mae = sys.maxsize
    best_mse = sys.maxsize
    for epoch in range(start_step, end_step + 1):
        step = -1
        train_loss = 0
        for blob in data_loader:
            step = step + 1
            im_data = blob['data']
            gt_data = blob['gt_density']
            gt_class_label = blob['gt_class_label']

            # data augmentation on the fly
            if np.random.uniform() > 0.5:
                # randomly flip input image and density
                im_data = np.flip(im_data, 3).copy()
                gt_data = np.flip(gt_data, 3).copy()
            if np.random.uniform() > 0.5:
                # add random noise to the input image
                im_data = im_data + np.random.uniform(
                    -10, 10, size=im_data.shape)

            density_map = net(im_data, gt_data, gt_class_label, class_wts)
            loss = net.loss
            train_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % disp_interval == 0:
                gt_count = np.sum(gt_data)
                density_map = density_map.data.cpu().numpy()
                et_count = np.sum(density_map)
                save_results(im_data, gt_data, density_map, output_dir)
                log_text = 'epoch: %4d, step %4d, gt_cnt: %4.1f, et_cnt: %4.1f, loss: %4.7f' % (
                    epoch, step, gt_count, et_count, train_loss)
                log_print(log_text, color='green', attrs=['bold'])

        if (epoch % 2 == 0):
            save_name = os.path.join(
                output_dir, '{}_{}_{}.h5'.format(method, dataset_name, epoch))
            save_net(save_name, net)
            # calculate error on the validation dataset
            mae, mse = evaluate_model(save_name, data_loader_val)
            if mae <= best_mae and mse <= best_mse:
                best_mae = mae
                best_mse = mse
                best_model = '{}_{}_{}.h5'.format(method, dataset_name, epoch)
            log_text = 'EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (epoch, mae, mse)
            log_print(log_text, color='green', attrs=['bold'])
            log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % (
                best_mae, best_mse, best_model)
            log_print(log_text, color='green', attrs=['bold'])
 def test_preprocess_image():
     data = ImageDataLoader('.', transforms=[preprocess_image], p=4, k=10)
     data_iter=data.flow()
     batch=next(data_iter)
     img=batch[0][0]
     array_to_img(img).save('test.jpg')
示例#9
0
def train():
    if tf.__version__.split('.')[0] != '2':
        tf.enable_eager_execution()

    transform = [preprocess_image]

    p = 4
    k = 16
    batch_size = p * k
    learning_rate = 0.05
    epochs = 120
    img_shape = (256, 256)
    margin = 0.3

    data = ImageDataLoader('.', transforms=transform, p=p, k=k)

    class_num = len(data.classes)
    steps_per_epoch = (class_num // p) * 40

    strategy = tf.distribute.MirroredStrategy()
    print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
    global_batch_size = batch_size * strategy.num_replicas_in_sync
    num_replicas_in_sync = strategy.num_replicas_in_sync

    batch_hard_func = BatchHard(
        margin=margin, reduction=losses_utils.ReductionV2.NONE)
    id_loss_func = SparseCategoricalCrossentropy(
        reduction=losses_utils.ReductionV2.NONE)

    id_loss_metrics = tf.keras.metrics.Mean()
    id_corrects = tf.keras.metrics.SparseCategoricalAccuracy()

    running_corrects = batch_hard_func.running_corrects
    running_margin = batch_hard_func.running_margin
    triple_loss_metrics = tf.keras.metrics.Mean()

    def loss_func(id_output, features, labels):
        triple_loss = tf.reduce_sum(batch_hard_func(labels, features)) / global_batch_size
        id_loss = tf.reduce_sum(id_loss_func(
            labels, id_output)) / global_batch_size
        id_loss_metrics.update_state(id_loss)
        triple_loss_metrics.update_state(triple_loss)
        return id_loss + triple_loss

    with strategy.scope():
        model = build_baseline_model(class_num, img_shape)

        finetune_weights = model.get_layer(name='resnet50').trainable_weights
        finetune_optimizer = SGD(
            learning_rate=learning_rate * 0.1, momentum=0.9, nesterov=True)

        train_weights = [
            w for w in model.trainable_weights if not w in finetune_weights]
        optimizer = SGD(learning_rate=learning_rate,
                        momentum=0.9, nesterov=True)

        all_weights = finetune_weights + train_weights

        # sgd = SGD(learning_rate=1)

        learning_rate_scheduler = LearningRateScheduler(
            [optimizer, finetune_optimizer])

        data_iter = data.flow()

        with open('checkpoint/model.json', 'w', encoding='utf-8') as fp:
            fp.write(model.to_json())

        def train_step(batch):
            imgs, labels = batch

            with tf.GradientTape(persistent=True) as tape:
                id_output, features = model(imgs)

                loss = loss_func(id_output, features, labels)
                # l2_loss = weight_decay * \
                #     tf.add_n([tf.nn.l2_loss(v)
                #               for v in model.trainable_weights])

            grads = tape.gradient(loss, all_weights)
            # l2_grads = tape.gradient(l2_loss, model.trainable_weights)

            finetune_grads = grads[:len(finetune_weights)]
            train_grads = grads[len(finetune_weights):]

            finetune_optimizer.apply_gradients(
                zip(finetune_grads, finetune_weights))
            optimizer.apply_gradients(zip(train_grads, train_weights))
            # sgd.apply_gradients(zip(l2_grads, model.trainable_weights))

            id_corrects.update_state(labels, id_output)

            return loss

        @tf.function
        def distributed_train_step(batch):
            per_replica_losses = strategy.experimental_run_v2(
                train_step, args=(batch,))
            loss = strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,
                                   axis=None)
            return loss

        # model.load_weights('checkpoint/30.h5')
        # learning_rate_scheduler([optimizer, finetune_optimizer], 20)

        with K.learning_phase_scope(1):
            history = defaultdict(list)

            for cur_epoch in range(1, epochs + 1):
                print('Epoch {}/{}'.format(cur_epoch, epochs))
                progbar = Progbar(steps_per_epoch)

                learning_rate_scheduler(cur_epoch)

                for i in range(steps_per_epoch):
                    batch = next(data_iter)
                    if len(batch[1]) != batch_size:
                        batch = next(data_iter)
                        assert len(batch[1]) == batch_size

                    loss = distributed_train_step(batch)

                    cur_data = [('loss', loss), ('id_acc', id_corrects.result())]

                    progbar.add(1, values=cur_data)

                print(
                    f'acc: {running_corrects.result()} margin: {running_margin.result()}')
                print(
                    f'id acc: {id_corrects.result()} id loss: {id_loss_metrics.result()}')
                print(
                    f'triple_loss: {triple_loss_metrics.result()}')
                running_corrects.reset_states()
                running_margin.reset_states()
                triple_loss_metrics.reset_states()
                id_corrects.reset_states()
                id_loss_metrics.reset_states()

                for key, val in cur_data:
                    history[key].append(float(val))

                with open('checkpoint/history.json', 'w') as fp:
                    json.dump(history, fp)

                if cur_epoch % 5 == 0:
                    model.save_weights(f'checkpoint/{cur_epoch}.h5')
示例#10
0
#defining the model

net = net.cuda()

#loading the trained weights
model_path = 'dataset/Shanghai/cmtl_shtechA_204.h5'

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()

data_loader = ImageDataLoader(
    'dataset/Shanghai/part_A_final/test_data/images/',
    'dataset/Shanghai/part_A_final/test_data/ground_truth',
    shuffle=False,
    gt_downsample=True,
    pre_load=True)
'''
unsqueeze(arg)是增添第arg个维度为1,以插入的形式填充
相反,squeeze(arg)是删除第arg个维度(如果当前维度不为1,则不会进行删除)
'''
# output = net(img.unsqueeze(0))
for blob in data_loader:
    im_data = blob['data']
    gt_data = blob['gt_density']
    output = net(im_data, gt_data)

print("Predicted Count : ", int(output.detach().cpu().sum().numpy()))

temp = np.asarray(output.detach().cpu().reshape(
示例#11
0
#Tensorboard  config
use_tensorboard = False
save_exp_name = method + '_' + dataset_name + '_' + 'v1'
remove_all_log = False  # remove all historical experiments in TensorBoardO
exp_name = None  # the previous experiment name in TensorBoard

rand_seed = 64678
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)

#loadt training and validation data
data_loader = ImageDataLoader(train_path,
                              train_gt_path,
                              shuffle=True,
                              gt_downsample=False,
                              pre_load=True)
class_wts = data_loader.get_classifier_weights()
data_loader_val = ImageDataLoader(val_path,
                                  val_gt_path,
                                  shuffle=False,
                                  gt_downsample=False,
                                  pre_load=True)

#load net and initialize it
net = CrowdCounter(ce_weights=class_wts)
network.weights_normal_init(net, dev=0.01)
net.cuda()
net.train()
示例#12
0
from model import build_baseline_model
import numpy as np
from data_loader import ImageDataLoader
from extract_feature import val_preprocess_image, query_preprocess_image

app = Flask(__name__)
app.static_folder = '../../data'
app.template_folder = '.'


def load_data(name):
    return np.load(f'{name}.npy')


query_data_loader = ImageDataLoader('.',
                                    transforms=[query_preprocess_image],
                                    name='query',
                                    shuffle=False)
gallery_data_loader = ImageDataLoader('.',
                                      transforms=[val_preprocess_image],
                                      name='gallery',
                                      shuffle=False)

query_paths = query_data_loader.paths
gallery_paths = gallery_data_loader.paths

query_features = load_data('query_features')
query_labels = load_data('query_labels')

gallery_features = load_data('gallery_features')
gallery_labels = load_data('gallery_labels')