예제 #1
0
    default='market1501std',
    choices=datasets.names())
parser.add_argument('-c', '--checkpoint', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--combine', type=str, default='123')
args = parser.parse_args()

# load data set
dataset = args.dataset
cur_path = os.getcwd()
data_dir = os.path.join(cur_path, 'data', dataset)
data = datasets.create(dataset, data_dir)
query_gallery = list(set(data.query) | set(data.gallery))

# model config
config1 = Config(batch_size=128)
config2 = Config(
    model_name='densenet121', height=224, width=224, batch_size=128)
config3 = Config(model_name='resnet101', batch_size=128)
if args.combine == '123':
    configs = [config1, config2, config3]
elif args.combine == '12':
    configs = [config1, config2]
elif args.combine == '23':
    configs = [config2, config3]
else:
    raise ValueError('wrong combination')

def eval(save_dir):
    mAP = []
    Acc = []
예제 #2
0
                    type=str,
                    default='market1501std',
                    choices=datasets.names())
parser.add_argument('-a', '--arch', type=str, default='resnet50')
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=128)
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--evaluate',
                    dest='evaluate',
                    action='store_true',
                    help='evaluate model on validation set')
parser.add_argument('--checkpoint', default='', type=str)
args = parser.parse_args()

config = Config(model_name=args.arch,
                img_translation=None,
                height=args.height,
                width=args.width)
# config1.height = 224
# config1.width = 224
# config1.epochs = 200
dataset = args.dataset
cur_path = os.getcwd()
logs_dir = os.path.join(cur_path, 'logs')
data_dir = os.path.join(cur_path, 'data', dataset)

data = datasets.create(dataset, data_dir)
# train_data,untrain_data = dp.split_dataset(data.trainval, 0.2)

if args.checkpoint is not None:

    model = models.create(config.model_name,
예제 #3
0
                                  configs[view])
            p_y = np.argmax(p_b, axis=1)
            t_y = [c for (_, c, _, _) in data.trainval]
            print(np.mean(t_y == p_y))
            #             evaluation current model and save it
            # mu.evaluate(model,data,configs[view])
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'epoch': step + 1,
                    'train_data': new_train_data
                },
                False,
                fpath=os.path.join(configs[view].logs_dir,
                                   configs[view].model_name,
                                   'spaco.epoch%d' % (step + 1)))
            # mkdir_if_missing(logs_pth)
            # torch.save(model.state_dict(), logs_pth +
            #           '/spaco.epoch%d' % (step + 1))


config1 = Config()
config2 = Config(model_name='densenet121', height=224, width=224)
dataset = 'market1501std'
cur_path = os.getcwd()
logs_dir = os.path.join(cur_path, 'logs')
data_dir = os.path.join(cur_path, 'data', dataset)
data = datasets.create(dataset, data_dir)

spaco([config1, config2], data, 4)
예제 #4
0
              'performance': mAP
          },
          False,
          fpath=os.path.join(
              save_dir, '%s.epoch%d' % (configs[view].model_name, step + 1)))
      if step + 1 == iter_steps:
        features += [
            mu.get_feature(net, query_gallery, data.images_dir, configs[view], device)
        ]
    add_ratio += 1.2
    #  pred_y = np.argmax(sum(pred_probs), axis=1)
  acc = mu.combine_evaluate(features, data)
  print(acc)


config1 = Config(model_name='resnet50', loss_name='weight_softmax')
config2 = Config(model_name='densenet121',
                 loss_name='weight_softmax',
                 height=224,
                 width=224)
config3 = Config(model_name='resnet101',
                 loss_name='weight_softmax',
                 img_translation=2)

dataset = args.dataset
cur_path = os.getcwd()
logs_dir = os.path.join(cur_path, 'logs')
data_dir = os.path.join(cur_path, 'data', dataset)
data = datasets.create(dataset, data_dir)

spaco([config1, config2],
예제 #5
0
                    help="input width, default: 128 for resnet*, "
                         "224 for densenet*")

args = parser.parse_args()

# prepare dataset
dataset = args.dataset
cur_path = os.getcwd()

logs_dir = os.path.join(cur_path, "logs")
data_dir = os.path.join(cur_path,'data',dataset)
data = datasets.create(dataset,data_dir)
train_data = data.trainval

# model config
config = Config()
config.num_features = 512
config.width = args.width
config.height = args.height
config.set_training(False)
config.model_name = args.arch

# create model
model = models.create(config.model_name, num_features=config.num_features,
                      dropout=config.dropout, num_classes=config.num_classes)
model = torch.nn.DataParallel(model).cuda()
# model = model.cuda()

#epoch analysis
for i in range(0, 5):
    # load model weights
예제 #6
0
파일: cotrain.py 프로젝트: Flowerfan/SPamCo
                    mu.get_feature(net, query_gallery, data.images_dir,
                                   configs[view], device)
                ]

        # update training data
        pred_y = np.argmax(sum(pred_probs), axis=1)
        add_id = sum(add_ids)
        if args.tricks:
            add_ratio += 1.2
            new_train_data, _ = dp.update_train_untrain(
                add_id, train_data, untrain_data, pred_y)
        else:
            if len(untrain_data) == 0:
                break
            new_train_data, untrain_data = dp.update_train_untrain(
                add_id, new_train_data, untrain_data, pred_y)
    acc = mu.combine_evaluate(features, data)
    print(acc)


config1 = Config()
config2 = Config(model_name='densenet121', height=224, width=224)
config3 = Config(model_name='resnet101', img_translation=2)
dataset = 'market1501std'
cur_path = os.getcwd()
logs_dir = os.path.join(cur_path, 'logs')
data_dir = os.path.join(cur_path, 'data', dataset)
data = datasets.create(dataset, data_dir)

cotrain([config2, config3], data, 5)
예제 #7
0
            weights[view] = get_weights(pred_probs[view], pred_y, train_data, add_ratio, gamma)
            sel_ids[view] = weights[view] > 0

            # calculate predict probility on all data
            p_b = mu.predict_prob(model, data.trainval, data_dir, configs[view])
            p_y = np.argmax(p_b, axis=1)
            t_y = [c for (_,c,_,_) in data.trainval]
            print(np.mean(t_y == p_y))
#             evaluation current model and save it
            # mu.evaluate(model,data,configs[view])
            save_checkpoint({
                'state_dict': model.state_dict(),
                'epoch': step +1,
                'train_data': new_train_data}, False,
                fpath = os.path.join(configs[view].logs_dir, configs[view].model_name, 'soft_spaco.epoch%d' % (step + 1))
            )
            # mkdir_if_missing(logs_pth)
            # torch.save(model.state_dict(), logs_pth +
            #           '/spaco.epoch%d' % (step + 1))

config1 = Config(loss_name='weight_softmax')
config2 = Config(model_name='densenet121', loss_name='weight_softmax',
                 height=224, width=224)
dataset = 'market1501std'
cur_path = os.getcwd()
logs_dir = os.path.join(cur_path, 'logs')
data_dir = os.path.join(cur_path,'data',dataset)
data = datasets.create(dataset, data_dir)

soft_spaco([config1,config2], data, 5)