Ejemplo n.º 1
0
    def build_model(self):
        self.p3d = C3D().cuda()
        self.load_model()

        self.gru = GRU(self.p3d).cuda()
        print("MODEL:")
        print(self.gru)
Ejemplo n.º 2
0
    def __init__(self, embed_size):
        super(EncoderC3D, self).__init__()
        base_model = C3D().cuda()
        base_model.load_state_dict(torch.load(WEIGHT_PATH))

        self.conv1 = base_model.conv1
        self.pool1 = base_model.pool1

        self.conv2 = base_model.conv2
        self.pool2 = base_model.pool2

        self.conv3a = base_model.conv3a
        self.conv3b = base_model.conv3b
        self.pool3 = base_model.pool3

        self.conv4a = base_model.conv4a
        self.conv4b = base_model.conv4b
        self.pool4 = base_model.pool4

        self.conv5a = base_model.conv5a
        self.conv5b = base_model.conv5b
        self.pool5 = base_model.pool5

        self.fc6 = base_model.fc6
        self.fc7 = base_model.fc7
        self.fc8 = nn.Linear(4096, embed_size)

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.5)
        self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
Ejemplo n.º 3
0
    def loading_model(self):

        print('Loading %s model' % (self.model_type))
        if self.model_type == 'C3D':
            self.model = C3D()
        elif self.model_type == 'I3D':
            self.model = I3D(num_classes=400, modality='rgb')
        else:
            self.model = P3D199(pretrained=False, num_classes=400, dropout=self.dropout)


        # Transfer classes
        self.model = transfer_model(model=self.model, model_type=self.model_type, num_classes=self.num_classes)

        # Check gpu and run parallel
        if check_gpu() > 0:
            self.model = torch.nn.DataParallel(self.model).cuda()

        # define loss function (criterion) and optimizer
        if check_gpu() > 0:
            self.criterion = nn.CrossEntropyLoss().cuda()
        else:
            self.criterion = nn.CrossEntropyLoss()

        policies = get_optim_policies(model=self.model, modality=self.modality, enable_pbn=True)

        self.optimizer = optim.SGD(policies, lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay)

        file = os.path.join(self.data_folder, 'model_best.pth.tar')
        if os.path.isfile(file):
            print("=> loading checkpoint '{}'".format('model_best.pth.tar'))

            checkpoint = torch.load(file)
            self.start_epoch = checkpoint['epoch']
            self.best_prec1 = checkpoint['best_prec1']
            self.model.load_state_dict(checkpoint['state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded model best ")
        else:
            print("=> no model best found at ")
            exit()

        cudnn.benchmark = True
Ejemplo n.º 4
0
        train_phase(train_dataloader, optimizer, criterion, epoch)
        test_phase(test_dataloader)
        if (epoch +
                1) % model_ckpt_interval == 0:  # save models every 5 epochs
            save_model(model_lstm, 'model_my_lstm', epoch, saving_dir)

        # lr updates
        if (epoch + 1) % global_lr_stepsize == 0:
            learning_rate = learning_rate * global_lr_gamma
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate


if __name__ == '__main__':
    # loading the altered C3D (ie C3D upto before fc-6)
    model_CNN_pretrained_dict = torch.load('c3d.pickle')
    model_CNN = C3D()
    model_CNN_dict = model_CNN.state_dict()
    model_CNN_pretrained_dict = {
        k: v
        for k, v in model_CNN_pretrained_dict.items() if k in model_CNN_dict
    }
    model_CNN_dict.update(model_CNN_pretrained_dict)
    model_CNN.load_state_dict(model_CNN_dict)
    model_CNN = model_CNN.cuda()

    # lstm
    model_lstm = LSTM_anno()
    model_lstm = model_lstm.cuda()

    main()
Ejemplo n.º 5
0
    return -dist


# 导入测试的参数
args = parse_args()
# print(args)
# 加载测试数据
test_file = '/opt/data/private/DL_Workspace/fsv-baseline/datafile/{}/few-shot-test.txt'.format(
    args.dataset)
data_mgr = SetDataManager(args.n_way, args.k_shot, args.q_query,
                          args.test_episode)
loader = data_mgr.get_data_loader(test_file)
# 加载模型
if args.model == 'C3D':
    model = C3D(
        num_classes=31, need_feature=True,
        pooling=args.pooling)  # 这里 num_classes 是一个没有用的参数,但必须加上, 保证和保存的模型参数匹配
elif args.model == 'FS_ResNet':
    model = FS_ResNet(args.k_shot, args.backbone_size)
elif args.model == 'FS_MENet':
    model = FS_MENet(args.k_shot, args.backbone_size)
else:
    raise NotImplementedError('no such a model!')
model = model.cuda()
check_dir = './chechpoints/FS_MENet34_hmdb51_SGD_lr_0.1_epi_True_epoch_100'
print(check_dir)
# best_file = get_best_file(check_dir)
# best_file = get_resume_file(check_dir)
best_file = get_assigned_file(check_dir, num=args.checkpoint)
tmp = torch.load(best_file)
Ejemplo n.º 6
0
    val_dataset = SimpleVideoDataset(val_file, split='val', clip_len=8)
    tr_loader = DataLoader(tr_dataset,
                           batch_size=args.batch_size,
                           shuffle=True,
                           num_workers=4)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            num_workers=4)

# 建立模型
if args.model == 'FS_ResNet':
    model = FS_ResNet(args.k_shot, args.backbone_size)
elif args.model == 'FS_MENet':
    model = FS_MENet(args.k_shot, args.backbone_size)
elif args.model == 'C3D':
    model = C3D(train_classes, pooling=args.pooling)
    args.backbone_size = ''
else:
    raise NotImplementedError("Not implement such a model")
model = model.cuda()
if args.optim == 'SGD':
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
    # optimizer = optim.SGD([
    #     {'params': model.resnet.parameters(), 'lr': args.lr/10.0},
    #     {'params': model.menet.parameters(), 'lr': args.lr/10.0}
    # ])
elif args.optim == 'Adam':
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
else:
    optimizer = None
    raise NotImplementedError("Not support for this optim:{}".format(
Ejemplo n.º 7
0
    DATA_TYPE, MODEL_TYPE, VIDEO_NAME, MODEL_NAME = opt.data_type, opt.model_type, opt.video_name, opt.model_name

    clip_len, resize_height, crop_size, = utils.CLIP_LEN, utils.RESIZE_HEIGHT, utils.CROP_SIZE
    class_names = utils.get_labels(DATA_TYPE)

    DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    if '{}_{}.pth'.format(DATA_TYPE, MODEL_TYPE) != MODEL_NAME:
        raise NotImplementedError(
            'the model name must be the same model type and same data type')

    if MODEL_TYPE == 'r2plus1d':
        model = R2Plus1D(len(class_names), (2, 2, 2, 2))
    else:
        model = C3D(len(class_names))

    checkpoint = torch.load('epochs/{}'.format(MODEL_NAME),
                            map_location=lambda storage, loc: storage)
    model = model.load_state_dict(checkpoint).to(DEVICE).eval()

    # read video
    cap, retaining, clips = cv2.VideoCapture(VIDEO_NAME), True, []
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    while retaining:
        retaining, frame = cap.read()
        if not retaining and frame is None:
            continue
        resize_width = math.floor(frame.shape[1] / frame.shape[0] *
                                  resize_height)
        # make sure it can be cropped correctly
Ejemplo n.º 8
0
    def loading_model(self):

        print('Loading %s model' % (self.model_type))

        if self.model_type == 'C3D':
            self.model = C3D()
            if self.pretrained:
                self.model.load_state_dict(torch.load('c3d.pickle'))
        elif self.model_type == 'I3D':
            if self.pretrained:
                self.model = I3D(num_classes=400, modality='rgb')
                self.model.load_state_dict(
                    torch.load('kinetics_i3d_model_rgb.pth'))
            else:
                self.model = I3D(num_classes=self.num_classes, modality='rgb')
        else:
            if self.pretrained:
                print("=> using pre-trained model")
                self.model = P3D199(pretrained=True,
                                    num_classes=400,
                                    dropout=self.dropout)

            else:
                print("=> creating model P3D")
                self.model = P3D199(pretrained=False,
                                    num_classes=400,
                                    dropout=self.dropout)
        # Transfer classes
        self.model = transfer_model(model=self.model,
                                    model_type=self.model_type,
                                    num_classes=self.num_classes)

        # Check gpu and run parallel
        if check_gpu() > 0:
            self.model = torch.nn.DataParallel(self.model).cuda()

        # define loss function (criterion) and optimizer
        self.criterion = nn.CrossEntropyLoss()

        if check_gpu() > 0:
            self.criterion = nn.CrossEntropyLoss().cuda()

        params = self.model.parameters()
        if self.model_type == 'P3D':
            params = get_optim_policies(model=self.model,
                                        modality=self.modality,
                                        enable_pbn=True)

        self.optimizer = optim.SGD(params=params,
                                   lr=self.lr,
                                   momentum=self.momentum,
                                   weight_decay=self.weight_decay)

        # self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=self.optimizer, mode='min', patience=10, verbose=True)

        # optionally resume from a checkpoint
        if self.resume:
            if os.path.isfile(self.resume):
                print("=> loading checkpoint '{}'".format(self.resume))
                checkpoint = torch.load(self.resume)
                self.start_epoch = checkpoint['epoch']
                self.best_prec1 = checkpoint['best_prec1']
                self.model.load_state_dict(checkpoint['state_dict'])
                self.optimizer.load_state_dict(checkpoint['optimizer'])
                print("=> loaded checkpoint '{}' (epoch {})".format(
                    self.evaluate, checkpoint['epoch']))
            else:
                print("=> no checkpoint found at '{}'".format(self.resume))

        if self.evaluate:
            file_model_best = os.path.join(self.data_folder,
                                           'model_best.pth.tar')
            if os.path.isfile(file_model_best):
                print(
                    "=> loading checkpoint '{}'".format('model_best.pth.tar'))
                checkpoint = torch.load(file_model_best)
                self.start_epoch = checkpoint['epoch']
                self.best_prec1 = checkpoint['best_prec1']
                self.model.load_state_dict(checkpoint['state_dict'])
                self.optimizer.load_state_dict(checkpoint['optimizer'])
                print("=> loaded checkpoint '{}' (epoch {})".format(
                    self.evaluate, checkpoint['epoch']))
            else:
                print("=> no checkpoint found at '{}'".format(self.resume))

        cudnn.benchmark = True
Ejemplo n.º 9
0
    # from models.BabyC3D import Crazy

    model = BabyC3D()
    # model = Crazy()
elif model_choice == 'small':
    from models.SmallC3D import SmallC3D

    model = SmallC3D()
elif model_choice == 'se3cnn':
    from models.Se3cnn import Se3cnn

    model = Se3cnn()
elif model_choice == 'c3d':
    from models.C3D import C3D

    model = C3D()
elif model_choice == 'small_siamese':
    from models.Siamese import SmallSiamese

    model = SmallSiamese()
elif model_choice == 'baby_siamese':
    from models.Siamese import BabySiamese

    model = BabySiamese()
elif model_choice == 'babyse3cnn':
    from models.BabySe3cnn import BabySe3cnn

    model = BabySe3cnn()
else:
    # Not possible because of argparse
    raise ValueError('Not a possible model')
Ejemplo n.º 10
0
        'val_top5_accuracy': [],
        'test_loss': [],
        'test_top1_accuracy': [],
        'test_top5_accuracy': []
    }
    # record best val accuracy
    best_accuracy = 0

    train_loader, val_loader, test_loader = utils.load_data(
        DATA_TYPE, BATCH_SIZE)
    NUM_CLASS = len(train_loader.dataset.label2index)

    if MODEL_TYPE == 'r2plus1d':
        model = R2Plus1D(NUM_CLASS, (2, 2, 2, 2))
    else:
        model = C3D(NUM_CLASS)

    if PRE_TRAIN is not None:
        checkpoint = torch.load('epochs/{}'.format(PRE_TRAIN),
                                map_location=lambda storage, loc: storage)
        # load pre-trained model which trained on the same dataset
        if DATA_TYPE in PRE_TRAIN:
            # load same type pre-trained model
            if PRE_TRAIN.split('.')[0].split('_')[1] == MODEL_TYPE:
                model.load_state_dict(checkpoint)
            else:
                raise NotImplementedError(
                    'the pre-trained model must be the same model type')
        # warm starting model by loading weights from a model which trained on other dataset, then fine tuning
        else:
            if PRE_TRAIN.split('.')[0].split('_')[1] == MODEL_TYPE:
Ejemplo n.º 11
0
def make_predictions(model_choice, model_name, loader):
    """
    Make the prediction for a class of model, trained in a file named model_name
    Saves a dict 'path like 1a0g_PMP_0.pdb.npy : predicted 128 embedding' in predictions/model_name
    :param model_choice:
    :param model_name:
    :return:
    """

    torch.multiprocessing.set_sharing_strategy('file_system')
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # I made a mistake in the saving script
    model_path = os.path.join('../trained_models', model_name,
                              model_name + '.pth')

    if model_choice == 'baby':
        from models.BabyC3D import BabyC3D

        # from models.BabyC3D import Crazy

        model = BabyC3D()
        # model = Crazy()
    elif model_choice == 'small':
        from models.SmallC3D import SmallC3D

        model = SmallC3D()
    elif model_choice == 'se3cnn':
        from models.Se3cnn import Se3cnn

        model = Se3cnn()
    elif model_choice == 'c3d':
        from models.C3D import C3D

        model = C3D()
    elif model_choice == 'small_siamese':
        from models.Siamese import SmallSiamese

        model = SmallSiamese()
    elif model_choice == 'baby_siamese':
        from models.Siamese import BabySiamese

        model = BabySiamese()
    elif model_choice == 'babyse3cnn':
        from models.BabySe3cnn import BabySe3cnn

        model = BabySe3cnn()
    else:
        # Not possible because of argparse
        raise ValueError('Not a possible model')
    model.to(device)
    model = torch.nn.DataParallel(model)

    # import torch.optim as optim
    # optimizer = optim.Adam(None)
    # print(model, model_path)

    dict_results = run_model(loader, model, model_path)
    pickle.dump(
        dict_results,
        open(f'../data/post_processing/predictions/{model_name}.p', 'wb'))
    return dict_results