def _test_epoch(self, epoch, save_result=False, save_for_visual=False):
        self.model.eval()
        self.test_loss.reset()
        self.test_metrics.reset()
        self.logger.info('Test: ')
        with torch.no_grad():
            for batch_idx, (data, target, image_name) in enumerate(self.test_data_loader):
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                loss = self.criterion(output, target)

                self.writer.set_step((epoch - 1) * len(self.test_data_loader) + batch_idx, 'test')
                self.test_loss.update(self.loss_name, loss.item())
                for metric in self.metrics:
                    self.test_metrics.update(metric.__name__, metric(output, target), n=output.shape[0])

                if save_result:
                    save_output(output, image_name, epoch, os.path.join(self.checkpoint_dir, 'tracker'), percent=1)

                if save_for_visual:
                    save_mask2image(output, image_name, os.path.join(self.checkpoint_dir, 'output'))
                    save_mask2image(target, image_name, os.path.join(self.checkpoint_dir, 'target'))

                if batch_idx % self.log_step == 0:
                    self.logger.debug('{}/{}'.format(batch_idx, len(self.test_data_loader)))
                    self.logger.debug('{}: {}'.format(self.loss_name, self.test_loss.avg(self.loss_name)))
                    self.logger.debug(SegmentationTrainer.get_metric_message(self.test_metrics, self.metric_names))

        log = self.test_loss.result()
        log.update(self.test_metrics.result())
        test_log = {'test_{}'.format(k): v for k, v in log.items()}
        return test_log
Exemplo n.º 2
0
def main():
    counter = 0
    print("Saving Images")
    if not exists(opt.output_dir):
        makedirs(opt.output_dir)
    with torch.no_grad():
        for batch in testing_data_loader:
            inimg, int1, int2, target = batch[0].to(device), batch[1].to(
                device), batch[2].to(device), batch[3].to(device)
            _, _, prediction = model(inimg, int1, int2, target)
            lowres_fname = (test_set.lowres_filenames[counter])
            fname = lowres_fname[27:40]
            in_filename = opt.output_dir + str(fname)
            out_filename = opt.output_dir + 'out' + str(fname)
            tg_filename = opt.output_dir + 'tg' + str(fname)
            result_filename = 'Result_' + str(fname)
            tv.save_image(inimg, in_filename)
            tv.save_image(prediction, out_filename)
            tv.save_image(target, tg_filename)
            low_img = Image.open(in_filename).convert('L')
            out_img = Image.open(out_filename).convert('L')
            hr_img = Image.open(tg_filename).convert('L')
            out_img = np.asarray(out_img)
            low_img = np.asarray(low_img)
            high_img = np.asarray(hr_img)
            save_output(lr_img=low_img,
                        prediction=out_img,
                        hr_img=high_img,
                        path=os.path.join(opt.output_dir,
                                          '%s' % result_filename))
            os.remove(in_filename)
            os.remove(out_filename)
            os.remove(tg_filename)
            counter += 1
Exemplo n.º 3
0
def test():
    paths = glob(join(c.TEST_DIR, '*.png'))
    imgs = run(paths)

    for i, path in enumerate(paths):
        save_path = utils.get_path(join(c.SAVE_DIR, 'test/' + basename(path)))
        utils.save_output([imgs[i]], save_path)
Exemplo n.º 4
0
def check_status(url, verbose=True, outfile=False):
    """Function to verify the API status.
    
    Parameters
    ----------
    url : str
        The endpoint to verify the API status.
    verbose : bool
        A flag to print the text output.
    outfile : bool
        A flag to save the text output into a JSON file.
    
    Returns
    -------
    response : dict
        The response as a JSON object.
    """
    
    # r <- GET request from url
    # response <- Convert r to JSON
    
    if outfile:
        save_output(r.text, 'check_status.json')
    
    if verbose:
        print(r.text)

    return response
    def _train_epoch(self, epoch):
        self.model.train()
        self.trained_model.eval()
        self.prepare_train_epoch(epoch)
        for batch_idx, (data, target,
                        image_name) in enumerate(self.train_data_loader):
            data, target = data.to(self.device), target.to(self.device)
            noise = self.model(data)
            # TODO: control noise
            noise_clamped = inf_norm_adjust(noise, self.noise_epsilon)
            noise_input = noise_clamped + data
            # TODO: control noise input
            noise_input_clamped = torch.clamp(noise_input, self.range_input[0],
                                              self.range_input[1])
            output = self.trained_model(noise_input_clamped)
            # reverse_nll_loss = self.call_loss(data, target, output)
            loss = self.cal_loss(data,
                                 target,
                                 output,
                                 loss_type=self.config['trainer']['loss_type'])
            # For debug model
            if torch.isnan(loss):
                super()._save_checkpoint(epoch)
                sys.exit(0)

            self.model.zero_grad()
            loss.backward()
            self.optimizer.step()

            # Update train loss, metrics
            self.train_loss.update(self.loss_name, loss.item())
            for metric in self.metrics:
                self.train_metrics.update(metric.__name__,
                                          metric(output, target),
                                          n=output.shape[0])

            if batch_idx % self.log_step == 0:
                self.log_for_step(epoch, batch_idx)

            if self.save_for_track and (batch_idx % self.save_for_track == 0):
                save_output(output, image_name, epoch, self.checkpoint_dir)

            if batch_idx == self.len_epoch:
                break

        log = self.train_loss.result()
        log.update(self.train_metrics.result())

        if self.do_validation and (epoch % self.do_validation_interval == 0):
            val_log = self._valid_epoch(epoch)
            log.update(val_log)

        # step lr scheduler
        if isinstance(self.lr_scheduler, MyReduceLROnPlateau):
            self.lr_scheduler.step(self.valid_loss.avg(self.loss_name))

        return log
    def _valid_epoch(self, epoch, save_result=False, save_for_visual=False):
        self.trained_model.eval()
        self.model.eval()
        self.valid_loss.reset()
        self.valid_metrics.reset()
        self.logger.info('Validation: ')
        with torch.no_grad():
            for batch_idx, (data, target,
                            image_name) in enumerate(self.valid_data_loader):
                data, target = data.to(self.device), target.to(self.device)
                noise = self.model(data)
                # TODO: control noise
                noise_clamped = inf_norm_adjust(noise, self.noise_epsilon)
                noise_input = noise_clamped + data
                # TODO: control noise input
                noise_input_clamped = torch.clamp(noise_input,
                                                  self.range_input[0],
                                                  self.range_input[1])
                output = self.trained_model(noise_input_clamped)
                loss = (-1) * self.criterion(output, target)
                self.writer.set_step(
                    (epoch - 1) * len(self.valid_data_loader) + batch_idx,
                    'valid')
                self.valid_loss.update(self.loss_name, loss.item())
                for metric in self.metrics:
                    self.valid_metrics.update(metric.__name__,
                                              metric(output, target),
                                              n=output.shape[0])

                if save_result:
                    save_output(output,
                                image_name,
                                epoch,
                                os.path.join(self.checkpoint_dir, 'tracker'),
                                percent=1)

                if save_for_visual:
                    save_mask2image(
                        output, image_name,
                        os.path.join(self.checkpoint_dir, 'output'))
                    save_mask2image(
                        target, image_name,
                        os.path.join(self.checkpoint_dir, 'target'))

                if batch_idx % self.log_step == 0:
                    self.logger.debug('{}/{}'.format(
                        batch_idx, len(self.valid_data_loader)))
                    self.logger.debug('{}: {}'.format(
                        self.loss_name, self.valid_loss.avg(self.loss_name)))
                    self.logger.debug(
                        SegmentationTrainer.get_metric_message(
                            self.valid_metrics, self.metric_names))

        log = self.valid_loss.result()
        log.update(self.valid_metrics.result())
        val_log = {'val_{}'.format(k): v for k, v in log.items()}
        return val_log
Exemplo n.º 7
0
def model_predictions(folder, start, end):
    batch_siz = 250
    iterator_count = int(((end - (start - 1)) / batch_siz)) + 1
    for j in range(1, iterator_count):
        print("Batch ", j, " started..")
        jstart = (j - 1) * batch_siz + start
        jend = j * batch_siz + (start - 1)
        inputs = load_images(file_name, folder, jstart, jend)
        inputs = scale_up(2, inputs)
        outputs = predict(model, inputs, batch_size=64)
        save_output(outputs, output_dir, folder, jstart, jend)
    return True
Exemplo n.º 8
0
def run_testing(session, config=FLAGS):
    files = get_tfrecord_files(config)
    logging.info('Total number of files  %d' % len(files))

    dataset = tf.data.TFRecordDataset(files, buffer_size=10000)
    dataset = dataset.map(parse_function)
    dataset = dataset.batch(1)
    iterator = dataset.make_one_shot_iterator()
    tf_next_element = iterator.get_next()

    (tf_lr_image, tf_hr_image_tensor, _) = tf_next_element
    tf_re_image = tf.image.resize_images(tf_lr_image, [FLAGS.image_size, FLAGS.image_size])
    tf_initial_mse = tf.losses.mean_squared_error(tf_hr_image_tensor, tf_re_image)
    tf_initial_rmse = tf.sqrt(tf_initial_mse)
    tf_initial_psnr = tf_psnr(tf_initial_mse)
    tf_initial_ssim = tf_ssim(tf_hr_image_tensor, tf_re_image)

    tf_prediction = srcnn(tf_lr_image, FLAGS.image_size)
    tf.initialize_all_variables().run()

    predicted_mse = tf.losses.mean_squared_error(tf_hr_image_tensor, tf_prediction)
    predicted_rmse = tf.sqrt(predicted_mse)
    predicted_psnr = tf_psnr(predicted_mse)
    predicted_ssim = tf_ssim(tf_hr_image_tensor, tf_prediction)

    load(session, config.checkpoint_dir)

    params_file = open('metrics.csv', 'w+')
    writer = csv.writer(params_file)
    writer.writerows([['filename', 'initial_rmse', 'rmse', 'initial_psnr', 'psnr', 'initial_ssim', 'ssim']])

    while True:
        try:
            tf_initial_params = [tf_initial_rmse, tf_initial_psnr, tf_initial_ssim]
            tf_predicted_params = [predicted_rmse, predicted_psnr, predicted_ssim]
            next_element, re_image, prediction, initial_params, predicted_params = session.run([tf_next_element, tf_re_image, tf_prediction, tf_initial_params, tf_predicted_params])
            (lr_image, hr_image, name) = next_element
            (initial_rmse, initial_psnr, initial_ssim) = initial_params
            (rmse, psnr, ssim) = predicted_params
            prediction = np.squeeze(prediction)
            name = str(name[0]).replace('b\'', '').replace('\'', '')
            logging.info('Enhance resolution for %s' % name)
            writer.writerows([[name, initial_rmse, rmse, initial_psnr, psnr, initial_ssim, ssim]])
            save_image(image=prediction, path=os.path.join(config.output_dir, PREDICTION, '%s.jpg' % name))
            save_image(image=re_image, path=os.path.join(config.output_dir, LOW_RESOLUTION, '%s.jpg' % name))
            save_image(image=hr_image, path=os.path.join(config.output_dir, HIGH_RESOLUTION, '%s.jpg' % name))
            save_output(lr_img=re_image, prediction=prediction, hr_img=hr_image, path=os.path.join(config.output_dir, '%s.jpg' % name))
        except tf.errors.OutOfRangeError as e:
            logging.error(e)
            break

    params_file.close()
Exemplo n.º 9
0
def transform_img(img_path, out_dir, img_size):
    img_np = preprocess_img(get_img(img_path, size=img_size))
    img_np = np.expand_dims(img_np, 0)

    # generator
    gen = get_module(img_np.shape, ctx)
    gen.load_params(args.checkpoint)

    data = mx.nd.array(img_np)
    gen.forward(mx.io.DataBatch([data], [0]), is_train=False)

    save_file = os.path.basename(os.path.normpath(img_path))
    save_output(gen, os.path.join(out_dir, save_file))
Exemplo n.º 10
0
def main(args):
    logging.basicConfig(level=logging.getLevelName(args.logging))
    logging.info('User args: %s' % pformat(args))
    config = load_config_from_json_file(
        args.config,
        ['gap', 'same', 'diff', 'max_number_of_paths', 'max_sequence_length'])
    logging.info('Config is: \n%s' % pformat(config))

    seq1 = load_fasta_file(args.input1)
    seq2 = load_fasta_file(args.input2)
    if config['max_sequence_length'] != 0 and max(
            len(seq1), len(seq2)) > config['max_sequence_length']:
        raise ValueError('Sequence exceeded max_sequence_length ')

    score_matrix, nodes_mapping = solve(seq1, seq2, config['gap'],
                                        config['diff'], config['same'])

    logging.debug('Score matrix: \n%s' % pformat(score_matrix))
    logging.debug('Nodes mapping: (target_node): [(parent_node),...]\n%s' %
                  pformat(nodes_mapping))
    logging.info('Alignments score: %s' % score_matrix[len(seq1), len(seq2)])

    paths = PathResolver(nodes_mapping).resolve_paths(
        len(seq1), len(seq2), config['max_number_of_paths'])

    allignments = [get_allignments(path, seq1, seq2) for path in paths]

    for (allignment_1, allignment_2), i in zip(allignments,
                                               range(len(allignments))):
        logging.info('[A%04d] %s' % (i, allignment_1))
        logging.info('[A%04d] %s' % (i, allignment_2))

    if args.output:
        save_output(
            args.output, {
                'seq1': seq1,
                'seq2': seq2,
                'config': config,
                'allignments': allignments,
                'score_matrix': score_matrix.tolist()
            })
        logging.info('Saved output to %s' % args.output)
    def _train_epoch(self, epoch):
        self.model.train()
        self.prepare_train_epoch(epoch)
        for batch_idx, (data, target, image_name) in enumerate(self.train_data_loader):
            data, target = data.to(self.device), target.to(self.device)
            output = self.model(data)
            loss = self.criterion(output, target)
            # For debug model
            if torch.isnan(loss):
                super()._save_checkpoint(epoch)

            self.model.zero_grad()
            loss.backward()
            self.optimizer.step()

            # Update train loss, metrics
            self.train_loss.update(self.loss_name, loss.item())
            for metric in self.metrics:
                self.train_metrics.update(metric.__name__, metric(output, target), n=output.shape[0])

            if batch_idx % self.log_step == 0:
                self.log_for_step(epoch, batch_idx)

            if self.save_for_track and (batch_idx % self.save_for_track == 0):
                save_output(output, image_name, epoch, self.checkpoint_dir)

            if batch_idx == self.len_epoch:
                break

        log = self.train_loss.result()
        log.update(self.train_metrics.result())

        if self.do_validation and (epoch % self.do_validation_interval == 0):
            val_log = self._valid_epoch(epoch)
            log.update(val_log)

        # step lr scheduler
        if isinstance(self.lr_scheduler, MyReduceLROnPlateau):
            self.lr_scheduler.step(self.valid_loss.avg(self.loss_name))

        return log
Exemplo n.º 12
0
def predict_emotion(img_path, url, verbose=True, outfile=False):
    """Function to call the emotion predictor API.
    
    Parameters
    ----------
    img_path : str
        The path to the image to be processed.
    url : str
        The endpoint to verify the API status.
    verbose : bool
        A flag to print the text output.
    outfile : bool
        A flag to save the text output into a JSON file.
    
    Returns
    -------
    response : dict
        The response as a JSON object.
    """
    
    # encoded_img <- Call image_encoder() on img_path

    data = {
        "image": encoded_img
    }
    
    # r <- POST request from url, send data as JSON object
    # response <- Convert r to JSON
    
    if outfile:
        save_output(r.text, 'predict_emotion.json')
    
    if verbose:
        print(r.text)

    return response
Exemplo n.º 13
0
    # open image as numpy array
    image = np.array(Image.open(input_img_path).convert(mode='L'))
    # gaussian blur
    blur_image = gaussian_blur(image,
                               kernel_size=int(args["kernel"]),
                               sigma=float(args["sigma"]),
                               verbose=False)

    # sobel
    sobel_x, sobel_y, sobel, _ = sobel_edge_detection(blur_image,
                                                      convert_to_degree=False)

    # prewitt
    prewitt_x, prewitt_y, prewitt = prewitt_edge_detection(blur_image)

    # canny
    # 1D Gaussian mask
    image_1D = gaussian_blur(image,
                             kernel_size=9,
                             sigma=float(args["sigma"]),
                             verbose=False)
    nms, threshold, canny = canny_edge_detection(image_1D)

    # save results
    save_output(folder_name, sobel_x, sobel_y, sobel, prewitt_x, prewitt_y,
                prewitt, nms, threshold, canny)
    # save_sobel(folder_name, sobel_x, sobel_y, sobel)
    # save_prewitt(folder_name, prewitt_x, prewitt_y, prewitt)
    # save_canny(folder_name, nms, threshold, canny)
def main():
    #move_by_dates(args)
    move_by_dates_101(args)
    save_output(console, "log/console.json")
Exemplo n.º 15
0
def main():
    global args, output_directory, train_csv, test_csvs, mm_scaler
    # MinMax-Scaler!
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    # evaluation mode
    start_epoch = 0
    if args.evaluate:
        assert os.path.isfile(args.evaluate), \
        "=> no best model found at '{}'".format(args.evaluate)
        print("=> loading best model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        output_directory = os.path.dirname(args.evaluate)
        args = checkpoint['args']
        start_epoch = checkpoint['epoch'] + 1
        model = checkpoint['model']
        print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
        _, val_loader = create_data_loaders(args, mm_scaler)
        args.evaluate = True
        validate(val_loader, model, checkpoint['epoch'], write_to_file=False)
        return

    # optionally resume from a checkpoint
    elif args.resume:
        chkpt_path = args.resume
        assert os.path.isfile(chkpt_path), \
            "=> no checkpoint found at '{}'".format(chkpt_path)
        print("=> loading checkpoint '{}'".format(chkpt_path))
        checkpoint = torch.load(chkpt_path)
        args = checkpoint['args']
        start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        output_directory = os.path.dirname(os.path.abspath(chkpt_path))
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        train_loader, val_loader = create_data_loaders(args, mm_scaler)
        args.resume = True

    # create new model
    else:
        train_loader, val_loader = create_data_loaders(args, mm_scaler)
        print("=> creating Model ({}) ...".format(args.arch))
        from models.rnn_model import Model
        if args.arch == 'LSTM':
            model = Model(input_dim=args.x_dim,
                          hidden_dim=args.hidden_size,
                          Y_target=args.y_target,
                          model_type="lstm")
        elif args.arch == 'GRU':
            model = Model(input_dim=args.x_dim,
                          hidden_dim=args.hidden_size,
                          Y_target=args.y_target,
                          model_type="gru")
        if args.arch == 'RNN':
            model = Model(input_dim=args.x_dim,
                          hidden_dim=args.hidden_size,
                          Y_target=args.y_target,
                          model_type="rnn")
        print("=> model created.")

        model_parameters = list(model.parameters())
        params = sum([np.prod(p.size()) for p in model_parameters])
        print("Num. of parameters: ", params)

        optimizer = torch.optim.Adam(model.parameters(),
                                     args.lr,
                                     weight_decay=args.weight_decay)

        # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
        model = model.cuda()

    criterion = nn.MSELoss().cuda()
    # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csvs = []
    for i in range(NUM_VAL_CSVS):
        test_csv_name = 'test_' + str(i) + '.csv'
        test_csv_each = os.path.join(output_directory, test_csv_name)
        test_csvs.append(test_csv_each)
    test_csv_total = os.path.join(output_directory, 'test.csv')
    test_csvs.append(test_csv_total)

    # 1 indicates total
    assert NUM_VAL_CSVS + 1 == len(test_csvs), "Something's wrong!"

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=[])
            writer.writeheader()
        for test_csv in test_csvs:
            with open(test_csv, 'w') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                writer.writeheader()

    best_rmse = 1000000000

    print("=> Learning start.")
    for epoch in range(start_epoch, args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr, args.decay_rate,
                                   args.decay_step)
        print("=> On training...")
        train(train_loader, model, criterion, optimizer,
              epoch)  # train for one epoch
        if epoch % args.validation_interval == 0:
            print("=> On validating...")
            result_rmse, results_list = validate(
                val_loader, model, epoch)  # evaluate on validation set
            # Save validation results
            print("=> On drawing results...")
            pngname = os.path.join(
                output_directory,
                str(epoch).zfill(2) + "_" + str(round(result_rmse, 5)) +
                ".png")
            utils.plot_trajectory(pngname, results_list[:-1])
            is_best = best_rmse > result_rmse
            if is_best:
                best_rmse = result_rmse
                best_name = os.path.join(output_directory, "best.csv")
                with open(best_name, 'w', newline='') as csvfile:
                    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writer.writeheader()
                    for result_container in results_list:
                        avg = result_container.result
                        writer.writerow({
                            'rmse': avg.rmse,
                            'mean': avg.mean,
                            'median': avg.median,
                            'var': avg.var,
                            'max': avg.error_max
                        })

                    writer.writerow({
                        'rmse': epoch,
                        'mean': 0,
                        'median': 0,
                        'var': 0,
                        'max': 0
                    })

                utils.save_output(results_list, epoch, output_directory)
                utils.save_checkpoint(
                    {
                        'args': args,
                        'epoch': epoch,
                        'arch': args.arch,
                        'model': model,
                        'optimizer': optimizer,
                        'scaler': mm_scaler
                    }, is_best, epoch, output_directory)
Exemplo n.º 16
0
def print_usage():
    print 'Usage:'
    print '(-p / --path=) <path/to/image/or/video> (Required.)'
    print '(-T / --test)  (Boolean flag. Whether to run the test function instead of normal run.)'


if __name__ == "__main__":
    paths = None

    try:
        opts, _ = getopt.getopt(sys.argv[1:], 'p:T', ['paths=', 'test'])
    except getopt.GetoptError:
        print_usage()
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-p', '--paths'):
            paths = [arg]
        if opt in ('-T', '--test'):
            test()
            sys.exit(2)

    if paths is None:
        print_usage()
        sys.exit(2)

    # Will only work for videos rn
    imgs_processed = run(paths)
    save_path = utils.get_path(join(c.SAVE_DIR, basename(paths[0])))
    utils.save_output(imgs_processed, save_path)
Exemplo n.º 17
0
             session.run([encoder_loss, l_ae, l_reg_z, l_reg_zr_ng, l_reg_zpp_ng, generator_loss, l_ae2, l_reg_zr, l_reg_zpp],
                         feed_dict={encoder_input: x, reconst_latent_input: z_x, sampled_latent_input: z_p})
            print('Epoch: {}/{}, iteration: {}/{}'.format(
                epoch + 1, args.nb_epoch, iteration + 1, iterations))
            print(
                ' Enc_loss: {}, l_ae:{},  l_reg_z: {}, l_reg_zr_ng: {}, l_reg_zpp_ng: {}'
                .format(enc_loss_np, enc_l_ae_np, l_reg_z_np, l_reg_zr_ng_np,
                        l_reg_zpp_ng_np))
            print(' Dec_loss: {}, l_ae:{}, l_reg_zr: {}, l_reg_zpp: {}'.format(
                generator_loss_np, dec_l_ae_np, l_reg_zr_np, l_reg_zpp_np))

        if ((global_iters % iterations_per_epoch == 0) and args.save_latent):
            utils.save_output(
                session, args.prefix, epoch, global_iters, args.batch_size,
                OrderedDict({encoder_input: test_next}),
                OrderedDict({
                    "test_mean": z_mean,
                    "test_log_var": z_log_var
                }), args.test_size)
            utils.save_output(
                session, args.prefix, epoch, global_iters, args.batch_size,
                OrderedDict({encoder_input: fixed_next}),
                OrderedDict({
                    "train_mean": z_mean,
                    "train_log_var": z_log_var
                }), args.latent_cloud_size)

            n_x = 5
            n_y = args.batch_size // n_x
            print('Save original images.')
            utils.plot_images(np.transpose(x, (0, 2, 3, 1)),
Exemplo n.º 18
0
def SA(requests,obavljene_zamjene,sts,ag,overlaps,limits,students,args,start,T_0 = 3,alpha=0.96):
    award_activity = list(map(int,args.award_activity.split(",")))
    minmax_penalty = args.minmax_penalty
    award_student = args.award_student
    timeout = args.timeout
    T = T_0
    s_best = Solution(limits=pickle.loads(pickle.dumps(limits)), sts=pickle.loads(pickle.dumps(sts)),
                      obavljene_zamjene=obavljene_zamjene.copy(), students=students, penalty=minmax_penalty, award_activity=award_activity, award_student=award_student)
    current_solution = copy.deepcopy(s_best)
    best_fitness = s_best.fitness()
    current_fitness = best_fitness
    milenial_fitness = best_fitness
    forbidden = []
    i = 0
    while True:
        flag = True # request is successfully deleted
        s_prime = pickle.loads(pickle.dumps(current_solution))
        s = None
        a = None
        g = None

        if time.time()-start > (timeout+20):
            if timeout == 600:
                save_output(s_best, args.dir, timeout, args.dir + args.students_file)
                timeout = 1800
            elif timeout == 1800:
                save_output(s_best, args.dir, timeout, args.dir + args.students_file)
                timeout = 3600
            else:
                break

        final = current_solution.scoreA()
        for _ in range(final):

            s, a, g = random.choice(s_prime.obavljene_zamjene)
            initial_group = s_prime.sts[s].new_groups[a]

            if legal_request(s, a, initial_group, s_prime.sts, s_prime.limits, ag, overlaps):
                flag = False
                forbidden.append((s,g))
                s_prime.limits[g].remove()
                s_prime.limits[initial_group].add()

                s_prime.obavljene_zamjene.remove((s,a,g))

                s_prime.sts[s].activities[a] = initial_group
                s_prime.sts[s].new_groups[a] = None
                break
        if flag:  # if we can't remove any sub, break the loop
            break
        alg_iteration(forbidden, s_prime.obavljene_zamjene, s_prime.limits, s_prime.sts, requests, ag, overlaps)
        prime_fitness = s_prime.fitness()

        if prime_fitness >= current_fitness:
            forbidden = []
            current_solution = s_prime
            current_fitness = prime_fitness
        elif random.uniform(0,1) < probability(T):
            current_solution = s_prime
            current_fitness = prime_fitness
        else:
            forbidden = []

        if current_fitness > best_fitness:
            s_best = current_solution
            best_fitness = current_fitness
        if i % 1500 == 0:
            if milenial_fitness == best_fitness:
                T = T_0
            else:
                milenial_fitness = best_fitness

        if i%1000 == 0:
            shuffle(requests)
        if i%200 == 0:
            T = T*alpha
        i += 1


    print("End of algoritm - Number of evaluation calls:", Solution.counter, ", Final fitness:", best_fitness)
    return s_best
Exemplo n.º 19
0
def save_metrics(metrics: pd.DataFrame, all_fitness: list,
                 all_populations: list, config: dict):
    """
    Method for saving metrics
    :param metrics:         values of metrics
    :param all_fitness:     values of all fitnesses
    :param all_populations: all populations.
    :param config:          experiment configuration
    :return:
    """
    current_time = config["experiment_time"]

    save_output(file=metrics,
                file_name=f'results_metrics_{current_time}',
                extension='csv',
                config=config)
    save_output(file=config,
                file_name=f'config_{current_time}',
                extension='txt',
                config=config)
    if config['save_every_iteration']:
        save_output(file=all_fitness,
                    file_name=f'fitness_all_{current_time}',
                    extension='txt',
                    config=config)
        save_output(file=all_populations,
                    file_name=f'populations_all_{current_time}',
                    extension='txt',
                    config=config)
    if config[
            'save_only_last_iteration'] and not config['save_every_iteration']:
        save_output(file=all_fitness[-1],
                    file_name=f'fitness_{current_time}',
                    extension='txt',
                    config=config)
        save_output(file=all_populations[-1],
                    file_name=f'population_{current_time}',
                    extension='txt',
                    config=config)
Exemplo n.º 20
0
    # Converts each string data element into a float, removes any empty rows
    numerized_data = filter(lambda x: len(x) > 0, [[float(i) for i in row]
                                                   for row in unlabeled_data])

    # store sse's for post analyses
    print "\nTESTING ON K = {}".format(INPUT_K)

    # Run k means on the numerized data
    centroids, results, iterations = k_means(INPUT_K, numerized_data)

    # Grab the labels in order
    predicted_labels = [r.current_centroid.cluster for r in results]

    # save the output
    save_output(predicted_labels, OUTPUT_FILE)

    # convenience print statements
    print "\nFinal centroids:"

    for centroid in [centroid.data for centroid in centroids]:
        print centroid

    sse = calc_sse(centroids, results)

    print "\nSSE: ", sse
    print "total iterations: ", iterations

    # FOR TESTING ON DIFFERENT VARIATIONS OF K
    # for i in range(2,6):
    #
Exemplo n.º 21
0
# mylist = [f for f in glob.glob(input_dir)]
for count in range(1,101):
  zip_file_name = "/content/drive/My Drive/Utils/S15A/Images/fg_bg" + str(count) + ".zip"
  my_dict = {"bg_number": count, "zip_file_name" : zip_file_name}
  mylist.append(my_dict)

print("My_List", mylist[start:end])

for file_name in mylist[start:end]:
  print(file_name, start, end)
  print("Bg Loop",datetime.datetime.now())
  with zipfile.ZipFile(file_name["zip_file_name"], 'r') as zip:
    start = timeit.default_timer()
    print("start", start)
    file_list = zip.namelist()
    new_zip_name = file_name["bg_number"]
    dense_depth_zip = zipfile.ZipFile(output_dir+f'/fb_bg_depth{new_zip_name}.zip', mode='a', compression=zipfile.ZIP_STORED)
    for i in range(0, 4000, 50):
      snipped_list = file_list[i:i+50]
      inputs= load_zip_images(zip, snipped_list)
      inputs = scale_up(2, inputs)
      outputs = predict(model, inputs)
      save_output(outputs, output_dir, snipped_list, dense_depth_zip, is_rescale=True)
      
    stop = timeit.default_timer()
    execution_time = stop - start
    dense_depth_zip.close()
    print("Program Executed in "+str(execution_time))


Exemplo n.º 22
0
                        l_reg_zpp_ng_np, lr_np))
            print(' Dec_loss: {}, l_ae:{}, l_reg_zr: {}, l_reg_zpp: {}, lr={}'.
                  format(generator_loss_np, dec_l_ae_np, l_reg_zr_np,
                         l_reg_zpp_np, lr_np))
            print(
                ' Disc_loss: {}, l_reg_zd: {}, l_reg_zr_ng: {}, l_reg_zpp_ng: {}'
                .format(disc_loss_np, l_reg_zd_np, l_reg_zr_ng_np,
                        l_reg_zpp_ng_np))

        if ((global_iters % iterations_per_epoch == 0) and args.save_latent):
            _ = session.run([test_iterator_init_op_a, test_iterator_init_op_b])
            _ = utils.save_output(
                session, '_'.join([args.prefix, args.dataset]), epoch,
                global_iters, args.batch_size,
                OrderedDict({encoder_input: fixed_next}),
                OrderedDict({
                    "train_mean": z_mean,
                    "train_log_var": z_log_var,
                    "train_reconstloss": reconst_loss
                }), args.latent_cloud_size)
            a_result_dict = utils.save_output(
                session, '_'.join([args.prefix, args.test_dataset_a]), epoch,
                global_iters, args.batch_size,
                OrderedDict({encoder_input: test_next_a}),
                OrderedDict({
                    "test_a_mean": z_mean,
                    "test_a_log_var": z_log_var,
                    "test_a_reconstloss": reconst_loss
                }), test_size_a, args.augment_avg_at_test, args.original_shape)
            b_result_dict = utils.save_output(
                session, '_'.join([args.prefix, args.test_dataset_b]), epoch,