Пример #1
0
    def switch_evaluation_to_training(engine):
        if needs_save:
            save_logs('val', k, n_splits, evaluator, trainer.state.epoch, trainer.state.iteration,
                      config, output_dir_path)

            if trainer.state.epoch % 100 == 0:
                save_models(model, optimizer, k, n_splits, trainer.state.epoch, trainer.state.iteration,
                            config, output_dir_path)
Пример #2
0
def main():
    """
    Parsing command lines, creating target matrix, fitting an SGCN, predicting edge signs, and saving the embedding.
    """
    args = parameter_parser()
    tab_printer(args)
    edges = read_graph(args)
    trainer = SignedGCNTrainer(args, edges)
    trainer.setup_dataset()
    trainer.create_and_train_model()
    if args.test_size > 0:
        trainer.save_model()
        score_printer(trainer.logs)
        save_logs(args, trainer.logs)
Пример #3
0
def main():
    """
    Parsing command line parameters, reading data.
    Doing sparsification, fitting a GWNN and saving the logs.
    """
    args = parameter_parser()
    tab_printer(args)
    graph = graph_reader(args.edge_path)
    features = feature_reader(args.features_path)
    target = target_reader(args.target_path)
    sparsifier = WaveletSparsifier(graph, args.scale, args.approximation_order, args.tolerance)
    sparsifier.calculate_all_wavelets()
    trainer = GWNNTrainer(args, sparsifier, features, target)
    trainer.fit()
    trainer.score()
    save_logs(args, trainer.logs)
Пример #4
0
def main():
    """
    Parsing command line parameters.
    Creating target matrix.
    Fitting an SGCN.
    Predicting edge signs and saving the embedding.
    """
    args = parameter_parser()
    avg_auc = []
    avg_f1 = []
    avg_precision = []
    avg_recall = []
    avg_acc = []

    for x in range(int(args.num_runs)):
        print("Iteration: ", x)
        tab_printer(args)
        edges = read_graph(args)
        trainer = SignedGCNTrainer(args, edges)
        trainer.setup_dataset()
        trainer.create_and_train_model()
        if args.test_size > 0:
            trainer.save_model()
            score_printer(trainer.logs)
            save_logs(args, trainer.logs)
            avg_auc.append(score_printer(trainer.logs, avg='auc')[0])
            print("This run's AUC: ",
                  "%.3f" % (score_printer(trainer.logs, avg='auc')[0]))
            print('-----')
            avg_f1.append(score_printer(trainer.logs, avg='auc')[1])
            avg_precision.append(score_printer(trainer.logs, avg='auc')[2])
            avg_recall.append(score_printer(trainer.logs, avg='auc')[3])
            avg_acc.append(score_printer(trainer.logs, avg='auc')[4])

    print('AUC averaged over {} runs: '.format(args.num_runs),
          "%.3f" % np.mean(avg_auc))
    print('F1 averaged over {} runs: '.format(args.num_runs),
          "%.3f" % np.mean(avg_f1))
    print('Precision averaged over {} runs: '.format(args.num_runs),
          "%.3f" % np.mean(avg_precision))
    print('Recall averaged over {} runs: '.format(args.num_runs),
          "%.3f" % np.mean(avg_recall))
    print('Accuracy averaged over {} runs: '.format(args.num_runs),
          "%.3f" % np.mean(avg_acc))
    print('Max AUC: ', "%.3f" % max(avg_auc), 'Max F1: ', "%.3f" % max(avg_f1), 'Max Precision: ', "%.3f" % max(avg_precision), \
    'Max Recall: ', "%.3f" % max(avg_recall), 'Max Accuracy', "%.3f" % max(avg_acc))
Пример #5
0
def gaussian_process_hyper_opt(data, labels, model, param_space, model_name):
    '''
    Bayesian Optimization of classifier hyper-parameters using Gaussian.
    Processes. Saves the best parameters to "logs" directory.
    [data]        : numpy array
    [labels]      : list or numpy array
    [model]       : scikit-learn model object
    [param_space] : list
    [model_name]  : str
    '''
    @use_named_args(param_space)
    def objective(**params):
        '''
        objective function to minimize
        '''
        model.set_params(**params)
        scores = cross_val_score(estimator=model,
                                 X=data,
                                 y=labels,
                                 cv=cf.n_cv_folds,
                                 n_jobs=cf.n_jobs,
                                 verbose=0)
        return -1 * np.mean(scores)

    # start Bayesian optimization
    gaussian_process = gp_minimize(objective,
                                   dimensions=param_space,
                                   n_calls=cf.n_iters,
                                   verbose=True)
    print('\tHyper-parameter search completed...')

    # found best hyper-parameters
    best_params = {
        parameter.name: gaussian_process.x[i]
        for i, parameter in enumerate(gaussian_process['space'])
    }
    print('Best hyperparameters:', best_params)

    # save best params to LOGS
    save_logs(best_params, model_name)

    return best_params
Пример #6
0
def main():
    """
    Parsing command line parameters.
    Creating target matrix.
    Fitting an SGCN.
    Predicting edge signs and saving the embedding.
    """
    args = parameter_parser()
    tab_printer(args)

    args.edge_path = '../input/bitcoin_otc.csv'
    args.embedding_path = '../output/embedding/bitcoin_otc_sgcn.csv'
    args.features_path = './input/bitcoin_otc.csv'
    args.regression_weights_path = '../output/weights/bitcoin_otc_sgcn.csv'
    args.epochs = 1

    edges = read_graph(args)  # 导入训练数据
    trainer = SignedGCNTrainer(args, edges)
    trainer.setup_dataset()  # 计算特征
    trainer.create_and_train_model()
    if args.test_size > 0:
        trainer.save_model()
        score_printer(trainer.logs)
        save_logs(args, trainer.logs)
def generate_positives(args: dict) -> None:
    pid = os.getpid()
    print(f"[INFO]: Process {pid} started")
    # Initialize augmentators
    deformator = Deformator(thresh=float(args["params"]["deform_thresh"]),
                            deformation_limit=float(
                                args["params"]["deform_limit"]))
    rotator = Rotation(rotation_limit=int(args["params"]["rotation_limit"]),
                       rotation_thresh=float(
                           args["params"]["rotation_thresh"]),
                       rotation_type="bound")
    resizer = Resize(
        resize_range=[float(e) for e in args["params"]["resize_limit"]])
    cutter = CutOut(thresh=float(args["params"]["cutout_thresh"]),
                    n=int(args["params"]["cutout_nb"]),
                    size=float(args["params"]["cutout_size"]),
                    squared=False)
    perspective_wrapper = PerspectiveWrapper(
        thresh=float(args["params"]["perspective_thresh"]),
        scale_limit=[float(e) for e in args["params"]["perspective_range"]])
    jpeg_compressor = JPEGCompressor(thresh=0.01)
    color = Color(thresh=float(args["params"]["color_thresh"]))
    noise_blurer = NoiseBlur(types=["multiply", "contrast", "blur"],
                             thresh=float(args["params"]["noise_blur_thresh"]))
    # NOTE: a) order matters; b) if tuple provided, only one augmentation in
    # the tuple will be randomly selected and applied.
    logo_aug_before = [
        deformator, (color, noise_blurer), (perspective_wrapper, cutter),
        rotator, resizer
    ]
    logo_aug_after = [jpeg_compressor]
    augmenter = Augmenter(
        logo_aug_before=logo_aug_before,
        logo_aug_after=logo_aug_after,
        transp_thresh=float(args["params"]["transp_thresh"]),
        transp_range=[float(e) for e in args["params"]["transp_range"]])
    if args["split"]:
        save_train_path = os.path.join(args["save_path"], "train")
        save_valid_path = os.path.join(args["save_path"], "valid")
        valid_required = int(args["nb_imgs_required"] * args["split"])
        train_required = args["nb_imgs_required"] - valid_required
        assert valid_required > 0 and train_required > 0

    background_gen = utils.get_background_image(args["background_dir"])
    logs = dict()
    logo_paths = [
        os.path.join(args["logo_dir"], e) for e in os.listdir(args["logo_dir"])
        if not e.endswith(".txt")
    ]
    total, exceptions = 0, 0
    img_count = args["img_count"]
    while True:
        if total == args["nb_imgs_required"]:
            break
        # Read random logo and background image
        logo_path = random.choice(logo_paths)
        backgr_path = next(background_gen)
        logo_image = cv2.imread(logo_path, cv2.IMREAD_UNCHANGED)
        backgr_image = cv2.imread(backgr_path)
        if logo_image is None:
            print(f"[ERROR]: Process: {pid} failed to open "
                  f"logo: {logo_path}")
            exceptions += 1
            continue
        if backgr_image is None:
            print(f"[ERROR]: Process: {pid} failed to open "
                  f"background: {backgr_path}")
            exceptions += 1
            continue

        # Generate synthetic image
        try:
            image, coord, augments = augmenter.generate_image(
                logo=logo_image, background=backgr_image)
        except Exception as e:
            exceptions += 1
            continue
        if image is None:
            print(f"[ERROR]: Img generated in process: {pid} is None")
            exceptions += 1
            continue

        # Save generated image
        if args["split"]:
            if total < train_required:
                store_path = os.path.join(save_train_path, f"{img_count}.jpg")
            else:
                store_path = os.path.join(save_valid_path, f"{img_count}.jpg")
        else:
            store_path = os.path.join(args["save_path"], f"{img_count}.jpg")
        try:
            cv2.imwrite(store_path, image)
        except Exception as e:
            print(f"[ERROR]: Process: {pid} failed to write generated "
                  f"image on disk. Error: {e}")
            exceptions += 1
            continue

        # Save txt file containing object coordinates
        if args["split"]:
            if total < train_required:
                txt_store_path = save_train_path
            else:
                txt_store_path = save_valid_path
        else:
            txt_store_path = args["save_path"]
        is_saved = utils.dump_coord_txt(cls=args["class_index"],
                                        payload=coord,
                                        name=img_count,
                                        save_path=txt_store_path)
        if not is_saved:
            print(f"[ERROR]: Process: {pid} failed to save "
                  f"coordinates into txt")
            exceptions += 1
            continue

        # Keep track of augmentation applied alongside nb of images generated
        logs[f"{img_count}.jpg"] = augments
        total += 1
        img_count += 1
        if total % 100 == 0:
            print(f"[INFO]: Process {pid} generated {total} images")

    # Save augmentation logs
    if logs:
        if args["split"]:
            logs_save_path = args["save_path"]
        else:
            logs_save_path = os.path.split(args["save_path"])[0]
        utils.save_logs(logs, logs_save_path, args["class_name"])
    print(f"[INFO]: Process: {pid} finishing with {exceptions} exceptions")
    def run_validation(engine):
        evaluator.run(val_data_loader, 1)

        if needs_save:
            save_logs('val', evaluator, config, output_dir_path)
            save_images(evaluator, trainer.state.epoch)
 def call_save_logs(engine):
     if needs_save:
         return save_logs('train', engine, config, output_dir_path)
Пример #10
0
        print('dalong log : for model {} , init with {}'.format(model_index,init_model));
        model_dict = torch.load(init_model);
        submodels[model_index].load_state_dict(model_dict);
    ##############
    # init Router
    #############
    init_model = os.path.join('./models/Encoder',args.init_router);
    model_dict = torch.load(init_model);
    router.load_state_dict(model_dict);

    Test(test_loader,submodels,router);

if __name__ == '__main__':

    parser = cfg.parser;
    args = parser.parse_args();
    tmp = args.init_submodel.split('\\');
    args.init_folder = args.init_folder.split();
    args.init_depth = args.init_depth.split();
    args.init_depth = [int(item) for item in args.init_depth]
    args.init_submodel = [tmp[index].split(' ')[-1] for index in range(1,len(tmp))];
    print('all the params set  = {}'.format(args));
    if not os.path.exists(args.checkpoint_folder):
        os.makedirs(args.checkpoint_folder);
        print('dalong log : all the models will be saved under {} \n'.format(args.checkpoint_folder));
    utils.save_logs(args.checkpoint_folder,args);
    main(args);



Пример #11
0
import utils, os
import numpy as np
from data import preprocess, visualization
from config import args

from keras.optimizers import Adam
from models.simple_cnn import SimpleCNN
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.losses import categorical_crossentropy

# save configs
utils.save_logs()

# data preprocessing
if args.is_data_video == 'True':
    # send 데이터가 오면 lstm모델 훈련을 위해 preprocess 부분을 다르게 짤 예정
    pass
elif args.is_data_video == 'False':
    xtrain, xvalid, ytrain, yvalid = preprocess.load_image()
else:
    print('Check argument is data video. It must be True or False')

#hyper parameters
num_features = 64
num_labels = 7
size = (48, 48)
batch_size = 64
epochs = 35

# Select Model
if args.model == 'sCNN':
Пример #12
0
    def switch_training_to_evaluation(engine):
        if needs_save:
            save_logs('train', k, n_splits, trainer, trainer.state.epoch, trainer.state.iteration,
                      config, output_dir_path)

        evaluator.run(test_data_loader, max_epochs=1)