Exemplo n.º 1
0
def main():
    args = parse_args()

    config = Config(args.config)
    device = get_device(config.device)

    if config.seed is not None:
        torch.manual_seed(config.seed)
        np.random.seed(config.seed)

    scale = get_scaler(config.scale)

    model = get_model(input_shape=30, model_config=config.model)
    model.to_device(device)

    if args.t:
        data = Dataset.read_csv(args.data_train, header=None)
        print(model)
        train(data, model, scale, config, device)
        model.save(args.weights, scale)

    if args.e:
        assert args.weights.exists(), "{} - does not exist".format(
            args.weights)
        data = Dataset.read_csv(args.data_test, header=None)
        print(model)
        model.load(args.weights, scale, device)
        evaluate(data, model, scale, device)
Exemplo n.º 2
0
def drop_na(data: Dataset) -> Dataset:
    data = data.copy()
    tmp_concat = pd.concat([data.X, pd.DataFrame(data.y, columns=["_TARGET_"])], axis=1)
    tmp_concat.dropna(inplace=True)
    tmp_concat.reset_index(drop=True, inplace=True)
    data.X = tmp_concat.drop(columns=["_TARGET_"]).copy()
    data.y = tmp_concat["_TARGET_"].copy().to_numpy().ravel()
    return data
Exemplo n.º 3
0
def main():

    par = Params(sys.argv)
    random.seed(par.seed)
    torch.manual_seed(par.seed)
    if torch.cuda.is_available(): torch.cuda.manual_seed_all(par.seed)

    if par.trn and par.val:
        chk = Checkpoint(par.dir)

        if chk.contains_model:  ####### resume training ####################################
            cfg, mod, opt = chk.load(par)  ### also moves to GPU if cfg.cuda
            #            cfg.update_par(par) ### updates par in cfg
            print_time('Learning [resume It={}]...'.format(cfg.n_iters_sofar))

        else:  ######################## training from scratch ##############################
            cfg = Config(par)  ### reads cfg and par (reads vocabularies)
            mod = Model(cfg)
            if cfg.cuda: mod.cuda()  ### moves to GPU
            opt = Optimizer(cfg, mod)  #build Optimizer
            print_time('Learning [from scratch]...')

        trn = Dataset(par.trn,
                      cfg.svoc,
                      cfg.tvoc,
                      par.batch_size,
                      par.max_src_len,
                      par.max_tgt_len,
                      do_shuffle=True,
                      do_filter=True,
                      is_test=False)
        val = Dataset(par.val,
                      cfg.svoc,
                      cfg.tvoc,
                      par.batch_size,
                      par.max_src_len,
                      par.max_tgt_len,
                      do_shuffle=True,
                      do_filter=True,
                      is_test=True)
        Training(cfg, mod, opt, trn, val, chk)

    elif par.tst:  #################### inference ##########################################
        chk = Checkpoint()
        cfg, mod, opt = chk.load(par, par.chk)
        #        cfg.update_par(par) ### updates cfg options with pars
        tst = Dataset(par.tst,
                      cfg.svoc,
                      cfg.tvoc,
                      par.batch_size,
                      0,
                      0,
                      do_shuffle=False,
                      do_filter=False,
                      is_test=True)
        print_time('Inference [model It={}]...'.format(cfg.n_iters_sofar))
        Inference(cfg, mod, tst)
Exemplo n.º 4
0
def nltk_movie_review_accuracy(num_iterations):
    """ Try different number of features, and optimize number of training iterations."""
    return 0, 0  # TODO: Exercise 4: remove line
    (training_documents, dev_documents, test_documents) = load_reviews()

    best_development_accuracy = 0.0
    best_num_features = 0
    best_classifier = None
    best_feature_set = None

    # Test different numbers of features.
    for n in [100, 1000, 10000]:
        print("Training with %d features..." % n)
        # Training set
        training_set = Dataset.from_document_collection(training_documents,
                                                        num_features=n)
        # Development set
        development_set = Dataset.from_document_collection(
            dev_documents, feature_set=training_set.feature_set)

        # Train classifier
        classifier = PerceptronClassifier.from_dataset(training_set)
        pass  # TODO: Exercise 4: train the classifier

        # Accuracies of classifier with n features
        train_accuracy = classifier.test_accuracy(training_set)
        development_accuracy = classifier.test_accuracy(development_set)

        if development_accuracy > best_development_accuracy:
            best_development_accuracy = development_accuracy
            best_num_features = n
            best_classifier = classifier.copy()
            best_feature_set = training_set.feature_set

    print(
        "Best classifier with %d features: \t Train Accuracy: %.4f \t Dev Accuracy: %.4f"
        % (n, train_accuracy, best_development_accuracy))
    print("Best number of features: %d " % best_num_features)
    print("Top features for positive class:")
    print(best_classifier.features_for_class(True))
    print("Top features for negative class:")
    print(best_classifier.features_for_class(False))

    # Compute test score for best setting.
    testing_set = Dataset.from_document_collection(
        test_documents, feature_set=best_feature_set)
    testing_accuracy = best_classifier.test_accuracy(testing_set)
    print("Test score for best setting: %.4f" % testing_accuracy)
    return best_development_accuracy, testing_accuracy
Exemplo n.º 5
0
def testKnee():
    lr = args.lr
    acc = args.rate
    batch_size = args.batch_size
    epochs = args.epochs
    mask_type = args.mask
    bn = args.bn
    w = args.w
    model_type = args.model
    data_type = args.data

    test_val = True

    if bn:
        path = "./params/%s_bn_brain_%s_%d.pkl" % (model_type, mask_type, acc)
    else:
        # path = "./params/%s_%s_%s_%d_6.pkl" % (model_type, data_type,mask_type, acc)
        path = "./params/%s_knee_%s_%d.pkl" % (model_type, mask_type, acc)
    print(path)
    # mask = np.load("./mask/%s/%s/%d.npy" % (data_type, mask_type, acc))
    # mask = np.load("./mask/%s/%s/256_256_%d.npy" % (data_type, mask_type, acc))

    # mask = sio.loadmat("./mask/knee/%s/mask_512_512_%d.mat" % (mask_type, acc))['Umask']
    mask = sio.loadmat("./mask/brain/%s/%s_256_256_%d.mat" %
                       (mask_type, mask_type, acc))['Umask']
    mask = np.transpose(mask, (1, 0))
    # mask = np.pad(mask, 128, 'constant')
    mask = fftshift(mask, axes=(-2, -1))
    mask_torch = torch.from_numpy(mask).float().cuda()

    model = MRIReconstruction(mask_torch, w, bn).cuda()

    from utils.test_data import KneeDataset as Dataset
    dataset = Dataset(mask_torch, ["./output/selected/selected.npy"])
    dataloader = DataLoader(dataset, batch_size=6, shuffle=False)

    if os.path.exists(path):
        model.load_state_dict(torch.load(path))
        print("Finished load model parameters!")

    print(lr, acc, batch_size, epochs)

    for data in dataloader:
        u_img, u_k, f_img, f_k = create_input(*data)
        import time
        start = time.time()

        result = model(*(u_k, u_img))
        end = time.time()
        print((end - start) / 6)
        result = result.cpu().detach().numpy()
        c, _, h, w = result.shape

        res = np.zeros((c, h, w), dtype=np.complex)

        res.real = result[:, 0]
        res.imag = result[:, 1]
        print(res.shape)
        np.save("./output/result_%s_%d_ours.npy" % (mask_type, acc), res)
        print("INFO: Saved result . size: ", res.shape)
Exemplo n.º 6
0
    def setUp(self):
        small_collection_train_1 = DocumentCollection.from_document_list(
            train_docs_1)
        self.small_dataset_train_1 = Dataset.from_document_collection(
            small_collection_train_1)

        small_collection_train_2 = DocumentCollection.from_document_list(
            train_docs_2)
        self.small_dataset_train_2 = Dataset.from_document_collection(
            small_collection_train_2)

        small_collection_dev = DocumentCollection.from_document_list(dev_docs)
        self.small_dataset_dev = Dataset.from_document_collection(
            small_collection_dev,
            feature_set=self.small_dataset_train_1.feature_set)

        small_collection_pred_1 = DocumentCollection.from_document_list(
            pred_docs_1)
        self.small_dataset_pred_test_1 = Dataset.from_document_collection(
            small_collection_pred_1,
            feature_set=self.small_dataset_train_1.feature_set)

        small_collection_pred_2 = DocumentCollection.from_document_list(
            pred_docs_2)
        self.small_dataset_pred_test_2 = Dataset.from_document_collection(
            small_collection_pred_2,
            feature_set=self.small_dataset_train_1.feature_set)

        small_collection_no_update = DocumentCollection.from_document_list(
            no_update_docs)
        self.small_instance_list_no_update = [
            DataInstance.from_document(doc,
                                       self.small_dataset_train_1.feature_set)
            for doc in small_collection_no_update.all_documents()
        ]

        small_collection_do_update = DocumentCollection.from_document_list(
            do_update_docs)
        self.small_instance_list_do_update = [
            DataInstance.from_document(doc,
                                       self.small_dataset_train_1.feature_set)
            for doc in small_collection_do_update.all_documents()
        ]
Exemplo n.º 7
0
def main(args):
    dataset = Dataset(args.data_path, args.offset_x, args.offset_y,
                      args.batch_size, args.batch_per_video)
    optimizer = keras.optimizers.Adam(lr=1e-4)
    model = ConvLSTM(optimizer, args.init_channel, args.block_num)

    if args.train == 'train':
        dataloader = dataset.train_loader()
        train(dataloader, model, args.epochs, args.steps_per_epoch,
              args.save_path)
        utils.save_model(model, args.save_path)
        x, y = next(dataloader)
        pred = model.predict(x)
        utils.make_image(pred, y)

    elif args.train == 'test':
        video_idx = int(input('예측할 동영상 인덱스를 입력하세요.'))
        x, y = dataset.test_loader(video_idx)
        model = utils.load_model(model, args.save_path)
        pred = test(model, x, y, args.batch_size)
        abnormal, score = anomaly_score(pred, y)
        plt.plot(score)
        plt.savefig('anomaly score.png')
        utils.make_video(pred, abnormal)
Exemplo n.º 8
0
def get_data(name, split_id, data_dir, height, width, batch_size, workers):
    root = osp.join(data_dir, name)
    dataset = Dataset(root, split_id=split_id)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    train_set = dataset.train
    num_classes = dataset.num_class

    train_transformer_img = T.Compose([
        # T.Resize((height, width)),
        T.ToTensor(),
        # normalizer,
    ])

    test_transformer_img = T.Compose([
        # T.Resize((height, width)),
        # T.RectScale(height, width),
        T.ToTensor(),
        # normalizer,
    ])

    train_loader = DataLoader(Preprocessor(
        train_set,
        root=dataset.images_dir,
        transform_img=train_transformer_img),
                              batch_size=batch_size,
                              num_workers=workers,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)

    val_loader = DataLoader(Preprocessor(dataset.val,
                                         root=dataset.images_dir,
                                         transform_img=test_transformer_img),
                            batch_size=batch_size,
                            num_workers=workers,
                            shuffle=False,
                            pin_memory=True)

    return dataset, num_classes, train_loader, val_loader
Exemplo n.º 9
0
import datetime

from utils.config import Config
from model.fasterrcnn import FasterRCNNTrainer, FasterRCNN
import tensorflow as tf
from utils.data import Dataset

physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)

config = Config()
config._parse({})

print("读取数据中....")
dataset = Dataset(config)

frcnn = FasterRCNN(21, (7, 7))
print('model construct completed')
"""
feature_map, rpn_locs, rpn_scores, rois, roi_indices, anchor = frcnn.rpn(x, scale)
'''
feature_map : (1, 38, 50, 256) max= 0.0578503
rpn_locs    : (1, 38, 50, 36) max= 0.058497224
rpn_scores  : (1, 17100, 2) max= 0.047915094
rois        : (2000, 4) max= 791.0
roi_indices :(2000,) max= 0
anchor      : (17100, 4) max= 1154.0387
'''
bbox = bboxes
label = labels
Exemplo n.º 10
0
def test_imputation(data, completer_func=None, multi=False, verboseID=""):
    # data is Dataset object
    # completer func is the imputation function
    # multi determines whether completer_func is a multiple imputation method
    # verboseID is the name of the completer_func function
    global PARAMS
    global N_SPLITS
    clfs = { # define all the classifiers with best parameters
        "KNN": KNeighborsClassifier(n_neighbors=PARAMS["KNN"]["n_neighbors"], leaf_size=PARAMS["KNN"]["leaf_size"]),
        "LinearSVC": LinearSVC(dual=False, tol=PARAMS["LinearSVC"]["tol"], C=PARAMS["LinearSVC"]["C"], max_iter=PARAMS["LinearSVC"]["max_iter"]),
        # "SVC": SVC(tol=PARAMS["SVC"]["tol"], C=PARAMS["SVC"]["C"], max_iter=PARAMS["SVC"]["max_iter"]),
        "Forest": RandomForestClassifier(n_estimators=PARAMS["Forest"]["n_estimators"], max_depth=PARAMS["Forest"]["max_depth"], min_samples_leaf=PARAMS["Forest"]["min_samples_leaf"]),
        "LogReg": LogisticRegression(tol=PARAMS["LogReg"]["tol"], C=PARAMS["LogReg"]["C"], max_iter=PARAMS["LogReg"]["max_iter"]),
        "Tree": DecisionTreeClassifier(max_depth=PARAMS["Tree"]["max_depth"], max_leaf_nodes=PARAMS["Tree"]["max_leaf_nodes"], min_samples_leaf=PARAMS["Tree"]["min_samples_leaf"]),
        "MLP": MLPClassifier(alpha=PARAMS["MLP"]["alpha"], learning_rate_init=PARAMS["MLP"]["learning_rate_init"], max_iter=PARAMS["MLP"]["max_iter"], hidden_layer_sizes=PARAMS["MLP"]["hidden_layer_sizes"], early_stopping=True, n_iter_no_change=5),
    }
    rawdata_cv = { # save each raw confusion matrix output cv
        "KNN": [],
        "LinearSVC": [],
        # "SVC": [],
        "Forest": [],
        "LogReg": [],
        "Tree": [],
        "MLP": [],
    }
    kf = StratifiedShuffleSplit(n_splits=N_SPLITS)
    fold = 1
    X = data.X
    y = data.y
    for train_idx, test_idx in kf.split(X, y):
        X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]
        y_train, y_test = y[train_idx], y[test_idx]
        X_test = X_test.reset_index(drop=True)
        X_train = X_train.reset_index(drop=True)
        if completer_func:
        # do imputations on training set and test set individually
            data_incomplete = Dataset("tmp", X_train, y_train, types=data.types, 
                protected_features=data.protected_features, categorical_features=data.categorical_features,
                encoders=[data.X_encoders, data.y_encoder])
            try:
                data_complete = completer_func(data_incomplete)
            except Exception as e:
                # catch exception and skip
                print("Exception occurred in completer function '{}': {}".format(verboseID, e))
                for clf_name in clfs.keys():
                    rawdata_cv[clf_name].append([])
                fold += 1
                continue
            if ((not multi) and data_complete.X.isnull().sum().sum() > 0) or (multi and sum([dd.X.isnull().sum().sum() for dd in data_complete]) > 0):
                # if completed dataset still contains missing values, skip
                print("Completer function '{}' produces missing values, skipped".format(verboseID))
                for clf_name in clfs.keys():
                    rawdata_cv[clf_name].append([])
                fold += 1
                continue
            # apply one-hot-encoding
            if multi:
                _ = [m.preprocess() for m in data_complete]
            else:
                data_complete.preprocess()
            X_train = [m.X_encoded.copy() for m in data_complete] if multi else data_complete.X_encoded.copy()
            y_train = data_complete[0].y.copy() if multi else data_complete.y.copy()

            data_incomplete = Dataset("tmp", X_test, y_test, types=data.types, 
                protected_features=data.protected_features, categorical_features=data.categorical_features,
                encoders=[data.X_encoders, data.y_encoder])
            try:
                data_complete = completer_func(data_incomplete)
            except Exception as e:
                print("Exception occurred in completer function '{}': {}".format(verboseID, e))
                for clf_name in clfs.keys():
                    rawdata_cv[clf_name].append([])
                fold += 1
                continue
            if ((not multi) and data_complete.X.isnull().sum().sum() > 0) or (multi and sum([dd.X.isnull().sum().sum() for dd in data_complete]) > 0):
                print("Completer function '{}' produces missing values, skipped".format(verboseID))
                for clf_name in clfs.keys():
                    rawdata_cv[clf_name].append([])
                fold += 1
                continue
            # apply one-hot-encoding
            if multi:
                _ = [m.preprocess() for m in data_complete]
            else:
                data_complete.preprocess()
            X_test = [m.X_encoded.copy() for m in data_complete] if multi else data_complete.X_encoded.copy()
            y_test = data_complete[0].y.copy() if multi else data_complete.y.copy()
        if not completer_func: multi = False
        # get result for each classifier
        for clf_name, clf in clfs.items():
            result = compute_confusion_matrix(X_train, y_train, X_test, y_test, clf, data.protected_features, multi=multi)
            rawdata_cv[clf_name].append(result)
        fold += 1
    return rawdata_cv
Exemplo n.º 11
0
def get_data(name, split_id, data_dir, height, width, batch_size, workers,
             combine_trainval):
    root = osp.join(data_dir, name)
    dataset = Dataset(root, split_id=split_id)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])

    num_classes = dataset.num_class

    if name == "GTOS_256":
        train_transformer_img = T.Compose([
            # T.Resize((height, width)),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_img = T.Compose([
            # T.Resize((height, width)),
            # T.RectScale(height, width),
            T.ToTensor(),
            # normalizer,
        ])

    if name == "CDMS_174":
        train_transformer_img = T.Compose([
            T.RandomCrop(256),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_img = T.Compose([
            T.CenterCrop(256),
            T.ToTensor(),
            # normalizer,
        ])

    if name == "CDMS_160":
        train_transformer_img = T.Compose([
            T.RandomCrop(256),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_img = T.Compose([
            T.CenterCrop(256),
            T.ToTensor(),
            # normalizer,
        ])

    train_loader = DataLoader(Preprocessor(
        dataset.train,
        root=dataset.images_dir,
        dataset_name=name,
        transform_img=train_transformer_img),
                              batch_size=batch_size,
                              num_workers=workers,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)

    val_loader = DataLoader(Preprocessor(dataset.val,
                                         root=dataset.images_dir,
                                         dataset_name=name,
                                         transform_img=test_transformer_img),
                            batch_size=batch_size,
                            num_workers=workers,
                            shuffle=False,
                            pin_memory=True)

    test_loader = DataLoader(Preprocessor(dataset.test,
                                          root=dataset.images_dir,
                                          dataset_name=name,
                                          transform_img=test_transformer_img),
                             batch_size=batch_size,
                             num_workers=workers,
                             shuffle=False,
                             pin_memory=True)

    return dataset, num_classes, train_loader, val_loader, test_loader
Exemplo n.º 12
0
def main():
    PROJECT_PATH = "/rapids/notebooks/host/BM_GPU"
    config_path = f"{PROJECT_PATH}/config_aicrowd.yaml"
    Data = Dataset(PROJECT_PATH, 'task1_etho', config_path)
    Data.load_data()

    # configuration
    INFO = Data.info
    INFO_items = list(INFO.items())
    INFO_items.sort(key=lambda x: x[1]['order'])
    config = Data.config

    with open (f"{PROJECT_PATH}/{config['result_path']}/angle_scale_model.pickle", 'rb') as file:
        angle_scaler = pickle.load(file)
    with open (f"{PROJECT_PATH}/{config['result_path']}/limb_scale_model.pickle", 'rb') as file:
        limb_scaler = pickle.load(file)

    # features
    tot_bp = Data.data_obj['rotated_bodypoints']*config['postural_weight']
    tot_angle = angle_scaler.transform(Data.data_obj['angles'])*config['postural_weight']
    tot_limb = limb_scaler.transform(Data.data_obj['limbs'])*config['postural_weight']
    tot_marker_pwr = None # Implement this later maybe
    tot_angle_pwr = Data.data_obj['angle_power']*config['kinematic_weight']
    tot_limb_pwr = Data.data_obj['limb_power']*config['kinematic_weight']

    # Postural Embedding
    if config['include_marker_postural']:
        start_timer = time.time()
        print(f"::: Marker Postural ::: START")
        p_marker_embed(config, INFO_items, tot_bp)
        print(f"::: Marker Postural ::: Computation Time: {time.time()-start_timer}")
    if config['include_angle_postural']:
        start_timer = time.time()
        print(f"::: Angle Postural ::: START")
        p_angle_embed(config, INFO_items, tot_angle)
        print(f"::: Angle Postural  ::: Computation Time: {time.time()-start_timer}")
    if config['include_limb_postural']:
        start_timer = time.time()
        print(f"::: Limb Postural  ::: START")
        p_limb_embed(config, INFO_items, tot_limb)
        print(f"::: Limb Postural  ::: Computation Time: {time.time()-start_timer}")
    if config['include_all_postural']:
        start_timer = time.time()
        print(f"::: Angle & Limb Postural ::: START Timer")
        p_angle_limb_embed(config, INFO_items, tot_angle, tot_limb)
        print(f"::: Angle & Limb Postural ::: Time Stamp: {time.time()-start_timer}")

    # Kinematic Embedding
    if config['include_marker_kinematic']:
        start_timer = time.time()
        print(f"::: Marker Kinematic ::: START")
        k_marker_embed(config, INFO_items, tot_marker_pwr)
        print(f"::: Marker Kinematic ::: Computation Time: {time.time()-start_timer}")
    if config['include_angle_kinematic']:
        start_timer = time.time()
        print(f"::: Angle Kinematic ::: START")
        k_angle_embed(config, INFO_items, tot_angle_pwr)
        print(f"::: Angle Kinematic ::: Computation Time: {time.time()-start_timer}")
    if config['include_limb_kinematic']:
        start_timer = time.time()
        print(f"::: Limb Kinematic ::: START")
        k_limb_embed(config, INFO_items, tot_limb_pwr)
        print(f"::: Limb Kinematic ::: Computation Time: {time.time()-start_timer}")
    if config['include_all_kinematic']:
        start_timer = time.time()
        print(f"::: Angle & Limb Kinematic ::: START Timer")
        k_angle_limb_embed(config, INFO_items, tot_angle_pwr, tot_limb_pwr)
        print(f"::: Angle & Limb Kinematic ::: Time Stamp: {time.time()-start_timer}")

    if config['include_all_features']:
        start_timer = time.time()
        print(f"::: Angle & Limb Kinematic & Postural ::: START Timer")
        kp_angle_limb_embed(config, INFO_items, tot_angle, tot_limb, tot_angle_pwr, tot_limb_pwr)
        print(f"::: Angle & Limb Kinematic & Postural ::: Time Stamp: {time.time()-start_timer}")
Exemplo n.º 13
0
def concat(data: Dataset) -> pd.DataFrame:
    data = data.copy()
    return pd.concat([data.X, pd.DataFrame(data.y, columns=["_TARGET_"])], axis=1)
Exemplo n.º 14
0
def get_data(name, split_id, data_dir, height, width, batch_size, workers,
             combine_trainval):
    root = osp.join(data_dir, name)
    dataset = Dataset(root, split_id=split_id)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    num_classes = dataset.num_class

    if name == "GTOS_256":

        train_transformer_img = T.Compose([
            # T.Resize((height, width)),
            T.ToTensor(),
            # normalizer,
        ])

        train_transformer_diff = T.Compose([
            T.Resize((240, 240)),
            # T.Grayscale(num_output_channels=3),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_img = T.Compose([
            # T.Resize((height, width)),
            # T.RectScale(height, width),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_diff = T.Compose([
            T.Resize((240, 240)),
            # T.RectScale(height, width),
            # T.Grayscale(num_output_channels=3),
            T.ToTensor(),
            # normalizer,
        ])

    if name == "CDMS_174":
        train_transformer_img = T.Compose([
            T.RandomCrop(256),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            # normalizer,
        ])

        train_transformer_diff = T.Compose([
            T.RandomCrop(256),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_img = T.Compose([
            T.CenterCrop(256),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_diff = T.Compose([
            T.CenterCrop(256),
            T.ToTensor(),
            # normalizer,
        ])

    if name == "CDMS_160":
        train_transformer_img = T.Compose([
            T.RandomCrop(256),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            # normalizer,
        ])

        train_transformer_diff = T.Compose([
            T.RandomCrop(256),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_img = T.Compose([
            T.CenterCrop(256),
            T.ToTensor(),
            # normalizer,
        ])

        test_transformer_diff = T.Compose([
            T.CenterCrop(256),
            T.ToTensor(),
            # normalizer,
        ])

    # a = Preprocessor(train_set, root=dataset.images_dir,
    #              transform_img=train_transformer_img, transform_diff=train_transformer_diff)
    # p = a[800]
    # import cv2
    # img = p[0]
    # img = img.numpy()
    # img = (np.transpose(img, (1, 2, 0)) * 255).astype(np.uint8)
    # cv2.imwrite('/Users/jason/Documents/GitHub/DAIN_py/tmp/img.png', img)
    # diff = p[1]
    # diff = diff.numpy()
    # diff = (np.transpose(diff, (1, 2, 0)) * 255).astype(np.uint8)
    # cv2.imwrite('/Users/jason/Documents/GitHub/DAIN_py/tmp/diff.png', diff)

    train_loader = DataLoader(Preprocessor(
        dataset.train,
        root=dataset.images_dir,
        dataset_name=name,
        transform_img=train_transformer_img,
        transform_diff=train_transformer_diff),
                              batch_size=batch_size,
                              num_workers=workers,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)

    val_loader = DataLoader(Preprocessor(dataset.val,
                                         root=dataset.images_dir,
                                         dataset_name=name,
                                         transform_img=test_transformer_img,
                                         transform_diff=test_transformer_diff),
                            batch_size=batch_size,
                            num_workers=workers,
                            shuffle=False,
                            pin_memory=True)

    test_loader = DataLoader(Preprocessor(
        dataset.test,
        root=dataset.images_dir,
        dataset_name=name,
        transform_img=test_transformer_img,
        transform_diff=test_transformer_diff),
                             batch_size=batch_size,
                             num_workers=workers,
                             shuffle=False,
                             pin_memory=True)

    return dataset, num_classes, train_loader, val_loader, test_loader
Exemplo n.º 15
0
def train_knee():
    from utils.data import KneeDataset as Dataset
    lr = args.lr
    acc = args.rate
    batch_size = args.batch_size
    epochs = args.epochs
    mask_type = args.mask
    bn = args.bn
    w = args.w
    model_type = args.model
    data_type = args.data
    cuda = args.cuda

    test_val = True

    if bn:
        path = "./params/%s_bn_%s_%s_%d.pkl" % (model_type, data_type,
                                                mask_type, acc)
    else:
        # path = "./params/%s_%s_%s_%d_6.pkl" % (model_type, data_type,mask_type, acc)
        path = "./params/%s_%s_%s_%d.pkl" % (model_type, data_type, mask_type,
                                             acc)
    print(path)
    # mask = np.load("./mask/%s/%s/%d.npy" % (data_type, mask_type, acc))
    # mask = np.load("./mask/%s/%s/256_256_%d.npy" % (data_type, mask_type, acc))

    # mask = sio.loadmat("./mask/%s/%s/%s_320_320_%d.mat" % (data_type, mask_type, mask_type, acc))['Umask']
    # mask = np.transpose(mask)
    # mask = fftshift(mask, axes=(-2, -1))
    # mask_torch = torch.from_numpy(mask).float().cuda()

    mask = sio.loadmat("./mask/brain/%s/%s_256_256_%d.mat" %
                       (mask_type, mask_type, acc))['Umask']
    mask = np.transpose(mask, (1, 0))
    # print("./mask/brain/%s/%s_256_256_%d.mat" % (mask_type, mask_type, acc))

    # import matplotlib.pyplot as plt

    # plt.imshow(mask, cmap="gray")
    # plt.show()

    mask = fftshift(mask, axes=(-2, -1))
    mask_torch = torch.from_numpy(mask).float().cuda()
    # print(mask_torch.size())
    vis = Visualizer()

    model = MRIReconstruction(mask_torch, w, bn).cuda()
    # model = nn.DataParallel(model, device_ids=[0, 2, 3])
    dataset = Dataset(mask_torch)

    dataloader = DataLoader(dataset, batch_size=batch_size)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    # criterion = MRIReconstructionLoss(0.01, 0.1, 1)
    criterion = nn.MSELoss()

    valid_dataset = Dataset(mask_torch, training=False)
    valid_dataloader = DataLoader(valid_dataset,
                                  batch_size=batch_size,
                                  shuffle=False)

    if os.path.exists(path):
        model.load_state_dict(torch.load(path, map_location='cuda:%d' % cuda))
        print("Finished load model parameters!")
        print(path)

    print(lr, acc, batch_size, epochs)

    for epoch in range(epochs):
        timestr = time.strftime("%H:%M:%S", time.localtime())
        print(timestr, end=": ")
        loss = 0
        valid_dataset.indexs = []
        _i = 1

        if test_val:
            with torch.no_grad():
                for data in valid_dataloader:
                    u_img, u_k, f_img, f_k = create_input(*data)
                    result = model(*(u_k, u_img))
                    loss += criterion(f_img, result).cpu().item()
                    _i += 1

        ############################## 可视化###########################
        # the common section

        print("Epoch: ",
              epoch,
              "Rate: ",
              acc,
              "Test Loss: ",
              loss / _i,
              end=" ")
        index = 40

        data = valid_dataset[index]
        a = torch.unsqueeze(data[0], dim=0)
        b = torch.unsqueeze(data[1], dim=0)
        c = torch.unsqueeze(data[2], dim=0)
        d = torch.unsqueeze(data[3], dim=0)
        u_img, u_k, f_img, f_k = create_input(*(a, b, c, d))

        result = model(*(u_k, u_img))
        index = 0

        u_img, u_k, f_img, f_k = create_input(*(a, b, c, d))
        u_img = u_img[index].detach().cpu().numpy()
        u_img = create_complex_value(u_img)
        img = f_img[index].detach().cpu().numpy()
        img = create_complex_value(img)
        vis.plot("%s %s Test Loss - %d" % (model_type, mask_type, acc), loss)
        vis.img("%s %s Undersampled image %d" % (model_type, mask_type, acc),
                abs(u_img))

        h, w = img.shape
        # img_ = img[h//2-128:h//2+128, w//2-128:w//2+128]
        vis.img("%s %s full image %d" % (model_type, mask_type, acc), abs(img))
        vis.img("%s %s Mask %d" % (model_type, mask_type, acc), mask)

        out = result[index].detach().cpu().numpy()
        out = create_complex_value(out)
        vis.img("%s %s Reconstructed image %d" % (model_type, mask_type, acc),
                np.abs(out))

        max_value = np.max(np.abs(img))
        max_value = 255
        print("SSIM: ",
              compare_ssim(np.abs(img), np.abs(out), data_range=max_value),
              "  ",
              compare_ssim(np.abs(img), np.abs(u_img), data_range=max_value),
              end="  ")

        print("SSIM: ",
              compare_psnr(np.abs(img), np.abs(out), data_range=max_value),
              "  ",
              compare_psnr(np.abs(img), np.abs(u_img), data_range=max_value))

        np.save("./data/rec_model3.npy", out)
        np.save("./data/under.npy", u_img)
        np.save("./data/full_img", img)

        for data in dataloader:
            u_img, u_k, f_img, f_k = create_input(*data)
            result = model(*(u_k, u_img))
            loss = criterion(f_img, result)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        torch.save(model.state_dict(), path)
Exemplo n.º 16
0
def test1():
    lr = args.lr
    acc = args.rate
    batch_size = args.batch_size
    epochs = args.epochs
    mask_type = args.mask
    bn = args.bn
    w = args.w
    model_type = args.model
    data_type = args.data

    test_val = True

    if bn:
        path = "./params/%s_bn_%s_%s_%d.pkl" % (model_type, data_type,
                                                mask_type, acc)
    else:
        # path = "./params/%s_%s_%s_%d_6.pkl" % (model_type, data_type,mask_type, acc)
        path = "./params/%s_%s_%s_%d.pkl" % (model_type, data_type, mask_type,
                                             acc)
    print(path)
    # mask = np.load("./mask/%s/%s/%d.npy" % (data_type, mask_type, acc))
    # mask = np.load("./mask/%s/%s/256_256_%d.npy" % (data_type, mask_type, acc))

    mask = sio.loadmat("./mask/%s/%s/%s_256_256_%d.mat" %
                       (data_type, mask_type, mask_type, acc))['Umask']
    mask = fftshift(mask, axes=(-2, -1))
    mask_torch = torch.from_numpy(mask).float().cuda()

    model = MRIReconstruction(mask_torch, w, bn).cuda()

    from utils.test_data import BrainDataset as Dataset

    if os.path.exists(path):
        model.load_state_dict(torch.load(path))
        print("Finished load model parameters!")

    print(lr, acc, batch_size, epochs)
    if not os.path.exists('./pano/%s/%d/res' % (mask_type, acc)):
        os.makedirs('./pano/%s/%d/res' % (mask_type, acc))

    for i in range(190):
        data = sio.loadmat("./pano/%s/%d/f_img/%d.mat" %
                           (mask_type, acc, i))['im_ori']

        np.save('./output/selected/selected.npy', data)

        dataset = Dataset(mask_torch, ["./output/selected/selected.npy"])
        dataloader = DataLoader(dataset, batch_size=6, shuffle=False)

        for data in dataloader:
            u_img, u_k, f_img, f_k = create_input(*data)
            result = model(*(u_k, u_img)).cpu().detach().numpy()
            c, _, h, w = result.shape

            res = np.zeros((c, h, w), dtype=np.complex)

            res.real = result[:, 0]
            res.imag = result[:, 1]

            np.save(
                "./pano/%s/%d/res/%d_%s_%s_%d" %
                (mask_type, acc, i, mask_type, model_type, acc), res)
            print("INFO: Saved result . size: ", res.shape)
Exemplo n.º 17
0
def cross_val(data_original: Dataset, data_config, clf_config, complete_function=None, selected_cols=[]):
    bias = []
    acc = []
    smote = SMOTE()
    scaler = StandardScaler()
    for i in range(10):
        if complete_function: data = gen_complete_random(data_original, random_ratio=0.4, selected_cols=selected_cols)
        else: data = data_original
        print("Running Cross Validation {}".format(i))
        bias_cv = []
        acc_cv = []
        for train_idx, test_idx in StratifiedShuffleSplit(n_splits=20).split(data.X, data.y):
            X_train, X_test = data.X.iloc[train_idx].copy(), data.X.iloc[test_idx].copy()
            Y_train, Y_test = data.y[train_idx], data.y[test_idx]
            X_train.reset_index(drop=True, inplace=True)
            X_test.reset_index(drop=True, inplace=True)

            if complete_function:
                data_incomplete = Dataset("tmp", X_train, Y_train, types=data.types, 
                    protected_features=data.protected_features, categorical_features=data.categorical_features,
                    encoders=[data.X_encoders, data.y_encoder])
                try:
                    data_complete = complete_function(data_incomplete)
                except Exception as e:
                    print("Error: {}. Skipped".format(e))
                    continue
                if data_complete.X.isnull().sum().sum() > 0:
                    print("Complete function error, skipped")
                    continue
                X_train = data_complete.X.copy()
                Y_train = data_complete.y.copy()
            X_train.drop(columns=data.protected_features, inplace=True)

            if complete_function:
                data_incomplete = Dataset("tmp", X_test, Y_test, types=data.types, 
                    protected_features=data.protected_features, categorical_features=data.categorical_features,
                    encoders=[data.X_encoders, data.y_encoder])
                try:
                    data_complete = complete_function(data_incomplete)
                except Exception as e:
                    print("Error: {}. Skipped".format(e))
                    continue
                if data_complete.X.isnull().sum().sum() > 0:
                    print("Complete function error, skipped")
                    continue
                X_test = data_complete.X.copy()
                Y_test = data_complete.y.copy()
            
            X_train_res, Y_train_res = smote.fit_resample(X_train, Y_train)
            X_scaled = scaler.fit_transform(X_train_res)
            clf = LogisticRegression(max_iter=clf_config["max_iter"], C=clf_config["C"], tol=clf_config["tol"])
            clf.fit(X_scaled, Y_train_res)
            X_test_scaled = pd.DataFrame(scaler.transform(X_test.drop(columns=data.protected_features)), columns=X_test.drop(columns=data.protected_features).columns)
            X_test_scaled = pd.concat([X_test_scaled, X_test[data.protected_features]], axis=1)
            X_test_A = X_test_scaled[X_test_scaled[data_config["target"]] == data_config["A"]].drop(columns=data.protected_features).to_numpy()
            X_test_B = X_test_scaled[X_test_scaled[data_config["target"]] == data_config["B"]].drop(columns=data.protected_features).to_numpy()
            Y_test_A = Y_test[X_test_scaled[X_test_scaled[data_config["target"]] == data_config["A"]].index.tolist()]
            Y_test_B = Y_test[X_test_scaled[X_test_scaled[data_config["target"]] == data_config["B"]].index.tolist()]
            matrix_A = confusion_matrix(Y_test_A, clf.predict(X_test_A)).ravel().tolist()
            matrix_B = confusion_matrix(Y_test_B, clf.predict(X_test_B)).ravel().tolist()
            try:
                bias_cv.append(newBias(matrix_A+matrix_B))
            except Exception as e:
                print("\tError: {}, skipped".format(e))
            acc_cv.append(accuracy_score(clf.predict(X_test_scaled.drop(columns=data.protected_features).to_numpy()), Y_test))
        bias.append(np.mean(bias_cv))
        acc.append(np.mean(acc_cv))
    return (np.mean(bias), np.mean(acc))
Exemplo n.º 18
0
if __name__ == '__main__':

    args = yaml.load(open('./configs/runner.yml', 'r'), Loader=yaml.FullLoader)
    args = {**args, **yaml.load(open('./configs/data.yml', 'r'), Loader=yaml.FullLoader)}

    seed = args["seed"]
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

    train_file, test_file, embedding_source = args["train_file"], args["test_file"], args["embedding_source"]

    dataset = Dataset(data_columns=args["data_info"], batch_size=args["batch_size"], seq_len=args["seq_len"])
    dataset.load_data(train_file, test_file, embedding_source)

    vocab_size = len(dataset.vocab)
    embeddings = dataset.embeddings

    model = instantiate_model(args["model"], vocab_size, embeddings)

    # tqdm_range = trange(args["epochs"], desc="Epoch", unit="epoch")
    print("=" * 54)
    for epoch in range(args["epochs"]):
        _, _ = model.run_epoch(dataset.train_batch_loader, dataset.valid_batch_loader)
        print("-" * 54)
        train_acc = model.evaluate(dataset.train_batch_loader)
        val_acc = model.evaluate(dataset.valid_batch_loader)
        print("Epoch %3d ended | train acc.: %3.2f | val acc.: %.2f" % ((epoch + 1), train_acc * 100, val_acc * 100))
Exemplo n.º 19
0
history = {
    'train_losses':[],
    'val_losses' :[],
    'epoch_data' : []
}

if(args.checkpoint_path):
    checkpoint = load_checkpoint(args.checkpoint_path)
    model.load_state_dict(checkpoint['model_state'])
    optimizer.load_state_dict(checkpoint['optimizer_state'])
    history = checkpoint['history']
    START_EPOCH = history['epoch_data'][-1]+1

    
ds = Dataset(path=args.dataset_path)
shuffle_dataset = True
# Creating data indices for training and validation splits:
dataset_size = len(ds)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset :
    np.random.seed(random_seed)
    np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]


# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
validation_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(ds,batch_size=batch_size,sampler=train_sampler)
Exemplo n.º 20
0
def main():
    config_name = sys.argv[1]

    # load configuration and data
    with open(config_name) as f:
        config = yaml.load(f, Loader=yaml.FullLoader)
    PROJECT_PATH = config['GPU_project_path']
    config_path = f"{PROJECT_PATH}/{config_name}"
    Data = Dataset(PROJECT_PATH, config_path)
    Data.load_data()

    # configuration
    INFO = Data.info
    INFO_values = Data.info_values
    skeleton = config['skeleton']
    skeleton_color = config['skeleton_color']
    skeleton_fill = config['skeleton_fill']
    num_videos_per_clusters = config['num_sample_videos']
    video_duration = config['max_video_length']
    video_type = config['video_type']

    # bodypoints
    if video_type == 0:
        tot_bp = Data.data_obj['bodypoints']
    elif video_type == 1:
        tot_bp = Data.data_obj['rotated_bodypoints']
    elif video_type == 2:
        tot_bp = Data.data_obj['scaled_bodypoints']
    # embeddings
    tot_embed = Data.data_obj['all_embeddings']
    # cluster
    tot_clusters = Data.data_obj['cluster']
    num_clusters = int(np.max(tot_clusters)) + 1

    # Determine Which frames to Abstract
    video_cluster_idx = {}
    for clust_i in range(num_clusters):
        sorted_list_idx = sorted((list(y) for (x, y) in itertools.groupby((
            enumerate(tot_clusters)), operator.itemgetter(1)) if x == clust_i),
                                 key=len,
                                 reverse=True)
        top_start_stop_idx = map(lambda x: [x[0][0], x[-1][0]],
                                 sorted_list_idx[0:num_videos_per_clusters])
        video_cluster_idx[clust_i] = np.array(list(top_start_stop_idx))

    global_start_frames = np.array(
        [val['global_start_fr'] for val in INFO_values])
    global_stop_frames = np.array(
        [val['global_stop_fr'] for val in INFO_values])
    global_directories = np.array([val['directory'] for val in INFO_values])

    for clust_i in range(0, num_clusters):
        # Create video
        fig, ax = plt.subplots(3, 3, figsize=(10, 10))
        fig.suptitle(f"Cluster {clust_i} Sample Videos")
        fig.tight_layout(rect=[0, 0.03, 1, 0.95])

        # animal video data
        if video_type == 0:
            video_i, file_start_fr = {}, {}
            for i, (start, stop) in enumerate(
                    tqdm(video_cluster_idx[clust_i],
                         desc="Collecting Videos")):
                file_bool = start > global_start_frames
                if any(file_bool):
                    file_start_fr[i] = global_start_frames[file_bool][-1]
                    file_path = global_directories[file_bool][-1]
                    file_key = file_path.split("/")[-1]
                    video_path = glob(
                        f"{config['raw_video_path']}/{file_key}.avi")[0]
                    video = skvideo.io.vread(video_path)

                    video_start = start - file_start_fr[i]
                    video_stop = video_start + video_duration
                    if video_stop < len(video):
                        video_i[i] = video[video_start:video_stop]
                    else:
                        video_i[i] = video[video_start:]
                else:
                    return  # don't create a video

        # video format
        FFMpegWriter = animation.writers['ffmpeg']
        writer = FFMpegWriter(fps=25)
        SAVE_PATH = f"{config['save_video_path']}/mutivideo_cluster{clust_i}.mp4"
        if not os.path.exists(f"{config['save_video_path']}"):
            os.makedirs(f"{config['save_video_path']}")
        with writer.saving(fig, SAVE_PATH, dpi=300):
            for fr_i in tqdm(np.arange(0, video_duration),
                             desc=f"Cluster {clust_i} Frame Loop"):
                for i, (start, stop) in enumerate(video_cluster_idx[clust_i]):
                    fr = start + fr_i

                    # stop animation if video ends
                    if (video_type == 0) & (fr_i >= len(video_i[i])):
                        continue
                    # configure plot
                    ax[i // 3, i % 3].clear()
                    ax[i // 3, i % 3].set_axis_off()
                    ax[i // 3,
                       i % 3].set(title=f"Cluster {int(tot_clusters[fr])}")

                    if (video_type == 1) | (video_type == 2):
                        ax[i // 3, i % 3].set(xlim=(-3, 3), ylim=(-3, 3))
                    if video_type == 0:
                        ax[i // 3, i % 3].imshow(video_i[i][fr_i])
                    for skeleton_i, color_i in zip(skeleton, skeleton_color):
                        ax[i // 3, i % 3].plot(tot_bp[fr, skeleton_i, 0],
                                               tot_bp[fr, skeleton_i, 1],
                                               marker="o",
                                               markersize=2,
                                               linewidth=2,
                                               alpha=0.6,
                                               c=color_i)
                    for fill_obj in skeleton_fill:
                        ax[i // 3, i % 3].add_patch(
                            matplotlib.patches.Polygon(
                                xy=tot_bp[fr, fill_obj['trapezoid'], 0:2],
                                fill=True,
                                alpha=0.7,
                                color=fill_obj['fill']))
                writer.grab_frame()
            plt.close()
Exemplo n.º 21
0
def convert_protected(data: Dataset) -> Tuple[Dataset, LabelEncoder]:
    data = data.copy()
    encoder = LabelEncoder()
    for feature in data.protected_features:
        data.X[feature] = encoder.fit_transform(data.X[feature])
    return data, encoder
Exemplo n.º 22
0
def main():
    PROJECT_PATH = "/rapids/notebooks/host/BM_GPU"
    config_path = f"{PROJECT_PATH}/config_aicrowd.yaml"
    num_videos_per_clusters = 9
    video_duration = 200 # frames

    Data = Dataset(PROJECT_PATH, 'task1_etho', config_path)
    Data.load_data()

    # configuration
    INFO = Data.info
    INFO_values = Data.info_values

    with open(f"{PROJECT_PATH}/config_aicrowd.yaml") as f:
        config = yaml.load(f, Loader=yaml.FullLoader)
    # config = Data.config
    skeleton = config['skeleton']
    skeleton_color= config['skeleton_color']
    skeleton_fill = config['skeleton_fill']
    
    # features
    #tot_bp = Data.data_obj['rotated_bodypoints']
    tot_bp = Data.data_obj['scaled_bodypoints']
    # embeddings
    tot_embed = Data.data_obj['all_embeddings']
    # cluster
    tot_clusters = Data.data_obj['cluster']
    num_clusters = int(np.max(tot_clusters))+1

    # Determine Which frames to Abstract
    video_cluster_idx = {}
    for clust_i in range(num_clusters):
        clust_idx = np.where(tot_clusters==clust_i)[0]
        difference = np.diff(clust_idx)

        # Find consecutive break
        break_idx = np.where(difference != 1)[0]
        mod_break_idx = np.insert(break_idx, 0, 0)
        break_difference = np.diff(mod_break_idx)

        # Find max consecutive
        sorted_idx = np.argsort(break_difference)
        top_idx = sorted_idx[-num_videos_per_clusters:]
        video_idx = np.array([[ clust_idx[mod_break_idx[idx]+1], clust_idx[mod_break_idx[idx+1]+1]] for idx in top_idx])
        video_cluster_idx[clust_i] = video_idx
    
    global_start_frames = np.array([val['global_start_fr'] for val in INFO_values])
    global_stop_frames = np.array([val['global_stop_fr'] for val in INFO_values])
    global_directories = np.array([val['directory'] for val in INFO_values])
    
    for clust_i in range(0, num_clusters):
        # Create video
        fig, ax = plt.subplots(3,3,figsize=(10,10))
        fig.suptitle(f"Cluster {clust_i} Sample Videos")
        fig.tight_layout(rect=[0, 0.03, 1, 0.95])

        # # animal video data
        # video_i, file_start_fr = {}, {}
        # for i, (start, stop) in enumerate(tqdm(video_cluster_idx[clust_i], desc="Collecting Videos")):
        #     file_bool = start > global_start_frames
        #     if any(file_bool):
        #         file_start_fr[i] = global_start_frames[file_bool][-1]
        #         file_path = global_directories[file_bool][-1]
        #         file_key = file_path.split("/")[-1]
        #         video_path = glob(f"{VIDEO_PATH}/{file_key}.avi")[0]
        #         video = skvideo.io.vread(video_path)

        #         video_start = start-file_start_fr[i]
        #         video_stop = video_start+video_duration
        #         if video_stop < len(video):
        #             video_i[i] = video[video_start:video_stop]
        #         else:
        #             video_i[i] = video[video_start:]
        #     else:
        #         return # don't create a video

        # video format        
        FFMpegWriter = animation.writers['ffmpeg']
        writer = FFMpegWriter(fps=25)
        SAVE_PATH=f"videos/task1_etho/mutivideo_cluster{clust_i}.mp4"
        with writer.saving(fig, SAVE_PATH, dpi=300):
            for fr_i in tqdm(np.arange(0, video_duration), desc=f"Cluster {clust_i} Frame Loop"):
                for i, (start, stop) in enumerate(video_cluster_idx[clust_i]):
                    fr = start+fr_i

                    # configure plot
                    ax[i//3,i%3].clear()
                    ax[i//3,i%3].set_axis_off()
                    ax[i//3,i%3].set(title=f"Cluster {int(tot_clusters[fr])}")
                    ax[i//3,i%3].set(xlim=(-3,3), ylim=(-3,3))

                    # ax[i//3,i%3].imshow(video_i[i][fr_i])

                    for skeleton_i, color_i in zip(skeleton, skeleton_color):
                        ax[i//3,i%3].plot(tot_bp[fr,skeleton_i,0], tot_bp[fr,skeleton_i,1], marker="o", markersize=2,
                            linewidth=2, alpha=0.6, c=color_i)
                    for fill_obj in skeleton_fill:   
                        ax[i//3,i%3].add_patch(matplotlib.patches.Polygon(xy=tot_bp[fr,fill_obj['trapezoid'],0:2], fill=True, 
                            alpha=0.7, color=fill_obj['fill']))

                writer.grab_frame()
            plt.close()
Exemplo n.º 23
0
def main():
    # grab arguments
    config_name = sys.argv[1]

    with open(config_name) as f:
        config = yaml.load(f, Loader=yaml.FullLoader)

    config_path = f"{config['GPU_project_path']}/{config_name}"
    PROJECT_PATH = config['GPU_project_path']
    Data = Dataset(PROJECT_PATH, config_path)
    Data.load_data()

    # configuration
    INFO = Data.info
    INFO_items = list(INFO.items())
    INFO_items.sort(key=lambda x: x[1]['order'])
    config = Data.config

    with open(
            f"{PROJECT_PATH}/{config['result_path']}/angle_scale_model.pickle",
            'rb') as file:
        angle_scaler = pickle.load(file)
    with open(
            f"{PROJECT_PATH}/{config['result_path']}/limb_scale_model.pickle",
            'rb') as file:
        limb_scaler = pickle.load(file)

    # features
    tot_bp = Data.data_obj['rotated_bodypoints'] * config['postural_weight']
    tot_angle = angle_scaler.transform(
        Data.data_obj['angles']) * config['postural_weight']
    tot_limb = limb_scaler.transform(
        Data.data_obj['limbs']) * config['postural_weight']
    tot_marker_pwr = None  # Implement this later maybe
    tot_angle_pwr = Data.data_obj['angle_power'] * config['kinematic_weight']
    tot_limb_pwr = Data.data_obj['limb_power'] * config['kinematic_weight']

    # take out bad frames
    tot_good_fr, tot_bad_fr, tot_disregard_fr = locate_bad_fr(config, tot_bp)

    tot_angle = tot_angle[tot_good_fr]
    tot_limb = tot_limb[tot_good_fr]
    tot_angle_pwr = tot_angle_pwr[tot_good_fr]
    tot_limb_pwr = tot_limb_pwr[tot_good_fr]

    print("******************")
    nan_fr = np.where(np.isnan(tot_limb_pwr))[0]
    print(len(np.unique(nan_fr)))
    print("******************")

    # Postural Embedding
    if config['include_marker_postural']:
        start_timer = time.time()
        print(f"::: Marker Postural ::: START")
        p_marker_embed(config, INFO_items, tot_bp)
        print(
            f"::: Marker Postural ::: Computation Time: {time.time()-start_timer}"
        )
    if config['include_angle_postural']:
        start_timer = time.time()
        print(f"::: Angle Postural ::: START")
        p_angle_embed(config, INFO_items, tot_angle)
        print(
            f"::: Angle Postural  ::: Computation Time: {time.time()-start_timer}"
        )
    if config['include_limb_postural']:
        start_timer = time.time()
        print(f"::: Limb Postural  ::: START")
        p_limb_embed(config, INFO_items, tot_limb)
        print(
            f"::: Limb Postural  ::: Computation Time: {time.time()-start_timer}"
        )
    if config['include_all_postural']:
        start_timer = time.time()
        print(f"::: Angle & Limb Postural ::: START Timer")
        p_angle_limb_embed(config, INFO_items, tot_angle, tot_limb)
        print(
            f"::: Angle & Limb Postural ::: Time Stamp: {time.time()-start_timer}"
        )

    # Kinematic Embedding
    if config['include_marker_kinematic']:
        start_timer = time.time()
        print(f"::: Marker Kinematic ::: START")
        k_marker_embed(config, INFO_items, tot_marker_pwr)
        print(
            f"::: Marker Kinematic ::: Computation Time: {time.time()-start_timer}"
        )
    if config['include_angle_kinematic']:
        start_timer = time.time()
        print(f"::: Angle Kinematic ::: START")
        k_angle_embed(config, INFO_items, tot_angle_pwr)
        print(
            f"::: Angle Kinematic ::: Computation Time: {time.time()-start_timer}"
        )
    if config['include_limb_kinematic']:
        start_timer = time.time()
        print(f"::: Limb Kinematic ::: START")
        k_limb_embed(config, INFO_items, tot_limb_pwr)
        print(
            f"::: Limb Kinematic ::: Computation Time: {time.time()-start_timer}"
        )
    if config['include_all_kinematic']:
        start_timer = time.time()
        print(f"::: Angle & Limb Kinematic ::: START Timer")
        k_angle_limb_embed(config, INFO_items, tot_angle_pwr, tot_limb_pwr)
        print(
            f"::: Angle & Limb Kinematic ::: Time Stamp: {time.time()-start_timer}"
        )

    if config['include_all_features']:
        start_timer = time.time()
        print(f"::: Angle & Limb Kinematic & Postural ::: START Timer")
        kp_angle_limb_embed(config, INFO_items, tot_angle, tot_limb,
                            tot_angle_pwr, tot_limb_pwr)
        print(
            f"::: Angle & Limb Kinematic & Postural ::: Time Stamp: {time.time()-start_timer}"
        )
Exemplo n.º 24
0
def load_dataset():
    global data_path
    dataset = Dataset(data_path, verbose=True)
    dataset_size = len(dataset.samples)
    assert dataset_size > 0
    return dataset
Exemplo n.º 25
0
from utils.data import Dataset

base_path = os.getcwd()
model_path = os.path.join(base_path, "model_save")
train_datapath = os.path.join(base_path, "data", "train.tfrecords")
val_datapath = os.path.join(base_path, "data", "test.tfrecords")


class My_config(config):
    Name = "ImageNet"
    Num_categrade = 13
    Batch_size = 10
    Use_learning_rate_reduce = False


real_config = My_config()

train_dataset = Dataset(train_datapath, config=real_config)
val_dataset = Dataset(val_datapath, config=real_config)
# a = next(train_dataset.data_generater())

model = Densenet(model_dir=model_path, config=real_config, mode='training')

model.train(train_dataset=train_dataset,
            val_dataset=val_dataset,
            learning_rate=real_config.Learning_rate,
            epochs=200,
            layers='heads')

print(model)