Пример #1
0
    def compiler(self, data):
        self.model = get_model(data.train_in.shape,
                               hid_act=self.args.ACT,
                               hid_filters=self.args.HF,
                               kernels=self.args.K,
                               pbc=self.args.PBC)

        self.model.compile(optimizer=self.args.OPT,
                           loss=self.loss,
                           metrics=self.metrics_list)
def extract(net, weight, imgs_path, mean_path, classes):

    if net == 'vgg16' or net == 'inception_v3':
        #layer_name = 'dense_1'
        #layer_name = 'global_average_pooling2d_1'
        layer_name = 'dense_2'

    elif net == 'smallnet':
        sys.exit(0)

    elif net == 'c3d':
        sys.exit(0)

    else:
        raise ValueError('Network "%s" not found!' % (net))

    m, params = architectures.get_model(net, nb_classes=int(classes))
    m.load_weights(weight)

    intermediate_layer_model = Model(inputs=m.input,
                                     outputs=m.get_layer(layer_name).output)

    weight_name = basename(weight)
    data_name = basename(imgs_path)
    dir_out = dirname(weight) + '/'
    name_out = dir_out + net + '_' + data_name + '_' + 'extracted_features_from_' + weight_name + '.txt'
    file_out = open(name_out, 'w')

    pf = pathfile.FileOfPaths(imgs_path)
    pb = progressbar.ProgressBar(pf.nb_lines)

    np.set_printoptions(precision=4)
    mean = cv2.imread(mean_path)

    with open(imgs_path) as f:
        _start(0)

        for line in f:
            pb.update()
            img_path, y = line.split()
            img = image.load_img(img_path)
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x -= mean
            intermediate_output = intermediate_layer_model.predict(x)
            file_out.write(imgs_path + ' ' + y)

            for i in np.nditer(intermediate_output):
                file_out.write(' ' + str(np.round(i, 4)))

            file_out.write('\n')

    file_out.close()
Пример #3
0
    def train(self, data):
        n_temp = len(self.args.T_list)
        for (iT, T) in enumerate(self.args.T_list):
            self.model = get_model(data.train_in.shape,
                                   feat_ext=self.args.FEAT,
                                   hid_act=self.args.ACT,
                                   hid_filters=self.args.HF,
                                   kernels=self.args.K,
                                   pbc=self.args.PBC)

            self.model.compile(optimizer=self.args.OPT,
                               loss=self.loss,
                               metrics=self.metrics_list)

            hist = self.model.fit(
                x=data.train_in[iT * self.args.nTR:(iT + 1) *
                                self.args.nTR][:self.args.TRS],
                y=data.train_out[iT * self.args.nTR:(iT + 1) *
                                 self.args.nTR][:self.args.TRS],
                batch_size=self.args.BS,
                epochs=self.args.EP,
                verbose=self.args.VB,
                callbacks=self.callbacks,
                validation_data=(
                    data.test_in[iT * self.args.nTE:(iT + 1) *
                                 self.args.nTE][:self.args.VALS],
                    data.test_out[iT * self.args.nTE:(iT + 1) *
                                  self.args.nTE][:self.args.VALS]))

            self.metrics = get_metrics(hist, reg=self.reg_flag)

            ### Save files ###
            npsave('%s/%s/Met%.4f.npy' % (self.args.metrics_dir, self.name, T),
                   self.metrics)
            self.model.save('%s/%s/Net%.4f.h5' %
                            (self.args.model_dir, self.name, T))

            print('Temperature %d / %d done!' % (iT + 1, n_temp))
Пример #4
0
import time
import architectures
import data_loaders
from utils import Hp, get_optimizer, get_hyperparameters
from train_test import train_and_test

# Load all hyperparameter permutations
if len(sys.argv)>1:
    hyperparameter_list_name = sys.argv[1]
else:
    hyperparameter_list_name = 'JSB Chorales'

time_start = time.time()
for hyperparameters in get_hyperparameters(hyperparameter_list_name):
    # Run experiment for each hyperparameter
    Hp.set_hyperparameters(hyperparameters)
    train_loader, test_loader = data_loaders.get_dataset()
    model = architectures.get_model()
    optimizer = get_optimizer(model)
    results = train_and_test(train_loader, test_loader, model, optimizer)

    # Save experiment in a dictionary and dump as a json
    with open(Hp.get_experiment_name(), 'w') as f:
        experiment_dict = {'hyperparameters': hyperparameters,
                           'results': results}
        json.dump(experiment_dict, f, indent=2)


# Print how long it took to run the algorithm
time_finish = time.time()
print('Runtime: ', time_finish - time_start)
    def parse_arg(self):
        opt = self.parser.parse_args()
        setup_seed(opt.seed)
        opt.amp_available = True if LooseVersion(
            torch.__version__) >= LooseVersion('1.6.0') and opt.amp else False
        ####################################################################################################
        """ Directory """
        dir_root = os.getcwd()
        opt.dir_data = os.path.join(dir_root, 'data', opt.dataset)
        opt.dir_img = os.path.join(opt.dir_data, 'image')
        opt.dir_label = os.path.join(opt.dir_data, 'label')
        opt.dir_log = os.path.join(dir_root, 'logs', opt.dataset,
                                   f"EXP_{opt.exp_id}_NET_{opt.arch}")

        opt.dir_vis = os.path.join(dir_root, 'vis', opt.dataset, opt.exp_id)
        opt.dir_result = os.path.join(dir_root, 'results', opt.dataset,
                                      opt.exp_id)

        ####################################################################################################
        """ Model Architecture """
        os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpus)
        opt.net = get_model(3, 1, opt.arch)
        opt.param = "%.2fM" % (sum(x.numel()
                                   for x in opt.net.parameters()) / 1e+6)
        opt.device = torch.device(
            f'cuda:{0}' if torch.cuda.is_available() else 'cpu')
        if opt.gpus is not None:
            warnings.warn(
                'You have chosen a specific GPU. This will completely '
                'disable data parallelism.')
        opt.net.to(device=opt.device)
        ####################################################################################################
        """ Optimizer """
        if opt.optim == "Adam":
            opt.optimizer = optim.Adam(opt.net.parameters(),
                                       lr=opt.lr,
                                       weight_decay=opt.l2)
        elif opt.optim == "SGD":
            opt.optimizer = optim.SGD(opt.net.parameters(),
                                      lr=opt.lr,
                                      momentum=0.9,
                                      weight_decay=opt.l2)
        ####################################################################################################
        """ Scheduler """
        if opt.sche == "ExpLR":
            gamma = 0.95
            opt.scheduler = torch.optim.lr_scheduler.ExponentialLR(
                opt.optimizer, gamma=gamma, last_epoch=-1)
        elif opt.sche == "MulStepLR":
            milestones = [90, 120]
            opt.scheduler = torch.optim.lr_scheduler.MultiStepLR(
                opt.optimizer, milestones=milestones, gamma=0.1)
        elif opt.sche == "CosAnnLR":
            t_max = 5
            opt.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                opt.optimizer, T_max=t_max, eta_min=0.)
        elif opt.sche == "ReduceLR":
            mode = "max"
            factor = 0.9
            patience = 10
            opt.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
                opt.optimizer, mode=mode, factor=factor, patience=patience)
        ####################################################################################################
        """ Loss Function """
        if opt.loss == "dice_loss":
            opt.loss_function = DiceLoss()
        elif opt.loss == "dice_bce_loss":
            opt.loss_function = DiceBCELoss()
        ####################################################################################################

        return opt