Ejemplo n.º 1
0
	def parse(self):
		if not self.initialized:
			self.initialize()
		self.opt = self.parser.parse_args()
		self.opt.mode = self.mode

		str_ids = self.opt.gpu_ids.split(',')
		self.opt.gpu_ids = []
		for str_id in str_ids:
			id = int(str_id)
			if id >= 0:
				self.opt.gpu_ids.append(id)

		# set gpu ids
		if len(self.opt.gpu_ids) > 0:
			torch.cuda.set_device(self.opt.gpu_ids[0])

		#I should process the opt here, like gpu ids, etc.
		args = vars(self.opt)
		print('------------ Options -------------')
		for k, v in sorted(args.items()):
			print('%s: %s' % (str(k), str(v)))
		print('-------------- End ----------------')

		# save to the disk
		expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
		utils.mkdirs(expr_dir)
		file_name = os.path.join(expr_dir, 'opt.txt')
		with open(file_name, 'wt') as opt_file:
			opt_file.write('------------ Options -------------\n')
			for k, v in sorted(args.items()):
				opt_file.write('%s: %s\n' % (str(k), str(v)))
			opt_file.write('-------------- End ----------------\n')
		return self.opt
Ejemplo n.º 2
0
    def __init__(self, args) -> None:
        self.lr = args.learning_rate
        self.LAMBDA = args.LAMBDA
        self.save = args.save
        self.batch_size = args.batch_size
        self.path = args.path
        self.n_epochs = args.epoch_num
        self.eval_interval = 10
        self.G_image_loss = []
        self.G_GAN_loss = []
        self.G_total_loss = []
        self.D_loss = []
        self.netG = Generator().to("cuda")
        self.netD = Discriminator().to("cuda")
        self.optimizerG = flow.optim.Adam(self.netG.parameters(),
                                          lr=self.lr,
                                          betas=(0.5, 0.999))
        self.optimizerD = flow.optim.Adam(self.netD.parameters(),
                                          lr=self.lr,
                                          betas=(0.5, 0.999))
        self.criterionGAN = flow.nn.BCEWithLogitsLoss()
        self.criterionL1 = flow.nn.L1Loss()

        self.checkpoint_path = os.path.join(self.path, "checkpoint")
        self.test_images_path = os.path.join(self.path, "test_images")

        mkdirs(self.checkpoint_path, self.test_images_path)
        self.logger = init_logger(os.path.join(self.path, "log.txt"))
Ejemplo n.º 3
0
    def parse(self, dirs=True):
        '''
        Parse program arguements
        Args:
            dirs (boolean): True to make file directories for predictions and models
        '''
        args = self.parse_args()
        
        args.run_dir = args.exp_dir + '/' + args.exp_name \
            + '/ntrain{}_batch{}_lr{}_nic{}'.format(
                args.ntrain, args.batch_size, args.lr, args.nic)

        args.ckpt_dir = args.run_dir + '/checkpoints'
        if(dirs):
            mkdirs(args.run_dir, args.ckpt_dir)
        
        assert args.epochs % args.ckpt_freq == 0, 'epochs must'\
            'be divisible by ckpt_freq'

        # Set random seed
        if args.seed is None:
            args.seed = random.randint(1, 10000)
        print("Random Seed: ", args.seed)
        random.seed(args.seed)
        torch.manual_seed(args.seed)

        print('Arguments:')
        pprint(vars(args))

        return args
Ejemplo n.º 4
0
def display_val(model, crit, writer, index, dataset_val, opt):
    # remove previous viz results
    save_dir = os.path.join('.', opt.checkpoints_dir, opt.name,
                            'visualization')
    utils.mkdirs(save_dir)

    #initial results lists
    accuracies = []
    classifier_losses = []
    coseparation_losses = []

    # initialize HTML header
    visualizer = viz.HTMLVisualizer(os.path.join(save_dir, 'index.html'))
    header = ['Filename', 'Input Mixed Audio']
    header += [
        'Predicted Audio'
        'GroundTruth Audio', 'Predicted Mask', 'GroundTruth Mask',
        'Loss weighting'
    ]
    visualizer.add_header(header)
    vis_rows = []

    with torch.no_grad():
        for i, val_data in enumerate(dataset_val):
            if i < opt.validation_batches:
                output = model.forward(val_data)
                loss_classification = crit['loss_classification']
                classifier_loss = loss_classification(
                    output['pred_label'],
                    Variable(output['gt_label'],
                             requires_grad=False)) * opt.classifier_loss_weight
                coseparation_loss = get_coseparation_loss(
                    output, opt, crit['loss_coseparation'])
                classifier_losses.append(classifier_loss.item())
                coseparation_losses.append(coseparation_loss.item())
                gt_label = output['gt_label']
                _, pred_label = torch.max(output['pred_label'], 1)
                accuracy = torch.sum(
                    gt_label == pred_label).item() * 1.0 / pred_label.shape[0]
                accuracies.append(accuracy)
            else:
                if opt.validation_visualization:
                    output = model.forward(val_data)
                    save_visualization(vis_rows, output, val_data, save_dir,
                                       opt)  #visualize one batch
                break

    avg_accuracy = sum(accuracies) / len(accuracies)
    avg_classifier_loss = sum(classifier_losses) / len(classifier_losses)
    avg_coseparation_loss = sum(coseparation_losses) / len(coseparation_losses)
    if opt.tensorboard:
        writer.add_scalar('data/val_classifier_loss', avg_classifier_loss,
                          index)
        writer.add_scalar('data/val_accuracy', avg_accuracy, index)
        writer.add_scalar('data/val_coseparation_loss', avg_coseparation_loss,
                          index)
    print('val accuracy: %.3f' % avg_accuracy)
    print('val classifier loss: %.3f' % avg_classifier_loss)
    print('val coseparation loss: %.3f' % avg_coseparation_loss)
    return avg_coseparation_loss + avg_classifier_loss
Ejemplo n.º 5
0
    def parse(self, dirs=True):
        '''
        Parse program arguements
        Args:
            dirs (boolean): True to make file directories for predictions and models
        '''
        args = self.parse_args()
        
        args.run_dir = args.exp_dir + '/' + args.exp_name \
            + '/ntrain{}_batch{}_blcks{}_lr{}_nic{}'.format(
                args.ntrain, args.batch_size, args.blocks, args.lr, args.nic)

        args.ckpt_dir = args.run_dir + '/checkpoints'
        args.pred_dir = args.run_dir + "/predictions"
        if(dirs):
            mkdirs(args.run_dir, args.ckpt_dir, args.pred_dir)

        # Set random seed
        if args.seed is None:
            args.seed = random.randint(1, 10000)
        print("Random Seed: ", args.seed)
        random.seed(args.seed)
        torch.manual_seed(args.seed)

        print('Arguments:')
        pprint(vars(args))

        if dirs:
            with open(args.run_dir + "/args.txt", 'w') as args_file:
                json.dump(vars(args), args_file, indent=4)

        return args
Ejemplo n.º 6
0
    def parse(self):
        if not self.initialized:
            self.initialize()
        opt = self.parser.parse_args()

        # set gpu
        torch.cuda.set_device(opt.gpu)

        args = vars(opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        if opt.suffix:
            suffix = (
                '_' +
                opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
            opt.name = opt.name + suffix
        # save to the disk
        expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
        util.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')

        self.opt = opt
        return self.opt
Ejemplo n.º 7
0
def test(args):
    """
    Test image of a given directory. Calculate the quantitative result if ground truth dir is provided.
    """
    Gnet = SketchNet(in_channels=3, out_channels=1, norm_type=args.Gnorm)
    gpu_ids = [int(x) for x in args.gpus.split(',')]
    if len(gpu_ids) > 0:
        Gnet.cuda()
        Gnet = nn.DataParallel(Gnet, device_ids=gpu_ids)
    Gnet.eval()
    Gnet.load_state_dict(torch.load(args.test_weight_path))

    utils.mkdirs(args.result_dir)
    for img_name in os.listdir(args.test_dir):
        test_img_path = os.path.join(args.test_dir, img_name)
        test_img = img_process.read_img_var(test_img_path, size=(256, 256))
        face_pred = Gnet(test_img)

        sketch_save_path = os.path.join(args.result_dir, img_name)
        img_process.save_var_img(face_pred, sketch_save_path, (250, 200))
        print('Save sketch in', sketch_save_path)

    if args.test_gt_dir != 'none':
        print(
            '------------ Calculating average SSIM (This may take for a while)-----------'
        )
        avg_ssim = avg_score(args.result_dir,
                             args.test_gt_dir,
                             metric_name='ssim',
                             smooth=False,
                             verbose=True)
        print(
            '------------ Calculating smoothed average SSIM (This may take for a while)-----------'
        )
        avg_ssim_smoothed = avg_score(args.result_dir,
                                      args.test_gt_dir,
                                      metric_name='ssim',
                                      smooth=True,
                                      verbose=True)
        print(
            '------------ Calculating average FSIM (This may take for a while)-----------'
        )
        avg_fsim = avg_score(args.result_dir,
                             args.test_gt_dir,
                             metric_name='fsim',
                             smooth=False,
                             verbose=True)
        print(
            '------------ Calculating smoothed average FSIM (This may take for a while)-----------'
        )
        avg_fsim_smoothed = avg_score(args.result_dir,
                                      args.test_gt_dir,
                                      metric_name='fsim',
                                      smooth=True,
                                      verbose=True)
        print('Average SSIM: {}'.format(avg_ssim))
        print('Average SSIM (Smoothed): {}'.format(avg_ssim_smoothed))
        print('Average FSIM: {}'.format(avg_fsim))
        print('Average FSIM (Smoothed): {}'.format(avg_fsim_smoothed))
Ejemplo n.º 8
0
    def print_options(self, opt):
        """print options and open save folder for saving options
           It will be saved in save_dir/model_name/[mode]opt.txt"""
        message = '----------------------Arguments-------------------------\n'
        for k, v in vars(opt).items():
            message += f'{k:>25}: {v:<30}\n'
        message += '---------------------End--------------------------------\n'
        print(message)

        # saving options
        result_dir = os.path.join(opt.save_dir, opt.model)
        utils.mkdirs(result_dir)
        opt_file_name = os.path.join(result_dir, f'{opt.mode}opt.txt')
        with open(opt_file_name, 'wt') as f:
            f.write(message)
Ejemplo n.º 9
0
def _process_vocab(args, questions) -> Dict:
    """If input_vocab_json is provided, then use (or expand) it, o.w. build vocab from train files"""
    # Either create the vocab or load it from disk
    if args.input_vocab_json == '' or args.expand_vocab == 1:
        logger.info('Building vocab')
        if 'answer' in questions[0]:
            answer_token_to_idx = preprocess_utils.build_vocab(
                (q['answer'] for q in questions))
        question_token_to_idx = preprocess_utils.build_vocab(
            (q['question'] for q in questions),
            min_token_count=args.unk_threshold,
            punct_to_keep=[';', ','],
            punct_to_remove=['?', '.'])
        all_program_strs = []
        for q in questions:
            if 'program' not in q: continue
            program_str = program_to_str(q['program'], args.mode)
            if program_str is not None:
                all_program_strs.append(program_str)
        program_token_to_idx = preprocess_utils.build_vocab(all_program_strs)
        vocab = {
            'question_token_to_idx': question_token_to_idx,
            'program_token_to_idx': program_token_to_idx,
            'answer_token_to_idx': answer_token_to_idx,
        }

    if args.input_vocab_json != '':
        logger.info('Loading vocab')
        if args.expand_vocab == 1:
            new_vocab = vocab
        with open(args.input_vocab_json) as f:
            vocab = json.load(f)
        if args.expand_vocab == 1:
            num_new_words = 0
            for word in new_vocab['question_token_to_idx']:
                if word not in vocab['question_token_to_idx']:
                    logger.info('Found new word %s' % word)
                    idx = len(vocab['question_token_to_idx'])
                    vocab['question_token_to_idx'][word] = idx
                    num_new_words += 1
            logger.info('Found %d new words' % num_new_words)

    if args.output_vocab_json != '':
        utils.mkdirs(os.path.dirname(args.output_vocab_json))
        with open(args.output_vocab_json, 'w') as f:
            json.dump(vocab, f)

    return vocab
Ejemplo n.º 10
0
    def save_networks(self):
        utils.mkdirs(self.save_dir)
        save_encoder_filename = f'{self.model_name}_e.pth'
        save_decoder_filename = f'{self.model_name}_d.pth'
        save_encoder_path = os.path.join(self.save_dir, save_encoder_filename)
        save_decoder_path = os.path.join(self.save_dir, save_decoder_filename)
        net_d = getattr(self, 'decoder')
        net_e = getattr(self, 'encoder')

        if len(self.gpu) > 0 and torch.cuda.is_available():
            torch.save(net_d.module.cpu().state_dict(), save_decoder_path)
            net_d.cuda(self.gpu[0])
            torch.save(net_e.module.cpu().state_dict(), save_encoder_path)
            net_e.cuda(self.gpu[0])
        else:
            torch.save(net_d.cpu().state_dict(), save_decoder_path)
            torch.save(net_e.cpu().state_dict(), save_encoder_path)
Ejemplo n.º 11
0
    def __init__(self, opt):
        # self.opt = opt
        self.display_id = opt.display_id
        self.use_html = opt.is_train and not opt.no_html
        self.win_size = opt.display_winsize
        self.name = opt.model
        self.opt = opt
        self.saved = False
        if self.display_id > 0:
            import visdom
            self.vis = visdom.Visdom(port=opt.display_port)

        if self.use_html:
            self.web_dir = os.path.join(opt.ckpt_dir, opt.model, 'web')
            self.img_dir = os.path.join(self.web_dir, 'images')
            print('create web directory %s...' % self.web_dir)
            utils.mkdirs([self.web_dir, self.img_dir])
        self.log_name = os.path.join(opt.ckpt_dir, opt.model, 'loss_log.txt')
        with open(self.log_name, "a") as log_file:
            now = time.strftime("%c")
            log_file.write('================ Training Loss (%s) ================\n' % now)
Ejemplo n.º 12
0
    def print_options(self, opt):
        """Print and save options

        It will print both current options and default values(if different).
        It will save options into a text file / [checkpoints_dir] / opt.txt
        """
        message = ''
        message += '----------------- Options ---------------\n'
        for k, v in sorted(vars(opt).items()):
            comment = ''
            default = self.parser.get_default(k)
            if v != default:
                comment = '\t[default: %s]' % str(default)
            message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
        message += '----------------- End -------------------'
        print(message)

        # save to the disk
        opt.expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
        utils.mkdirs(opt.expr_dir)
        file_name = os.path.join(opt.expr_dir, '{}_opt.txt'.format(opt.phase))
        with open(file_name, 'wt') as opt_file:
            opt_file.write(message)
            opt_file.write('\n')

        opt.log_dir = os.path.join(opt.checkpoints_dir, 'log_dir')
        utils.mkdirs(opt.log_dir)
        opt.log_archive = os.path.join(opt.checkpoints_dir, 'log_archive')
        utils.mkdirs(opt.log_archive)
Ejemplo n.º 13
0
    def __init__(self, opt):
        # self.opt = opt
        self.display_id = opt.display_id
        self.use_html = opt.is_train and not opt.no_html
        self.win_size = opt.display_winsize
        self.name = opt.model
        self.opt = opt
        self.saved = False
        if self.display_id > 0:
            import visdom
            self.vis = visdom.Visdom(port=opt.display_port)

        if self.use_html:
            self.web_dir = os.path.join(opt.ckpt_dir, opt.model, 'web')
            self.img_dir = os.path.join(self.web_dir, 'images')
            print('create web directory %s...' % self.web_dir)
            utils.mkdirs([self.web_dir, self.img_dir])
        self.log_name = os.path.join(opt.ckpt_dir, opt.model, 'loss_log.txt')
        with open(self.log_name, "a") as log_file:
            now = time.strftime("%c")
            log_file.write(
                '================ Training Loss (%s) ================\n' % now)
Ejemplo n.º 14
0
    def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()
        self.opt.mode = self.mode

        args = vars(self.opt)
        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
        utils.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt
Ejemplo n.º 15
0
    def __init__(self, opt, save_dir, filename='loss_log.txt'):
        self.display_id = opt.display_id
        self.use_html = not opt.no_html
        self.win_size = opt.display_winsize
        self.save_epoch_freq = opt.save_epoch_freq
        # if save epoch frequency option is negative, volumes will not be saved
        self.save_to_disk = True if opt.save_epoch_freq >= 0 else False
        self.save_dir = save_dir
        self.name = os.path.basename(self.save_dir)
        self.saved = False
        self.display_single_pane_ncols = opt.display_single_pane_ncols

        # Error plots
        self.error_plots = dict()
        self.error_wins = dict()

        if self.display_id > 0:
            import visdom
            self.vis = visdom.Visdom(port=opt.display_port)

        if self.save_volumes:
            self.saved_volumes_dir = os.path.join(self.save_dir, 'saved_volumes')
            utils.mkdir(self.saved_volumes_dir)

        if self.use_html:
            self.web_dir = os.path.join(self.save_dir, 'web')
            self.img_dir = os.path.join(self.web_dir, 'images')
            print('create web directory %s...' % self.web_dir)
            utils.mkdirs([self.web_dir, self.img_dir])
        self.log_name = os.path.join(self.save_dir, filename)
        self.log_table = os.path.join(self.save_dir, filename.split('.')[0] + '.xlsx')
        if os.path.exists(self.log_table):
            timestamp = str(time.time()).split('.')[0]
            self.log_table = os.path.splitext(self.log_table)[0] + '_' + timestamp + '.xlsx'

        with open(self.log_name, "a") as log_file:
            now = time.strftime("%c")
            log_file.write('================ Training Loss (%s) ================\n' % now)
    def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()
        self.opt.mode = self.mode

        #I should process the opt here, like gpu ids, etc.
        args = vars(self.opt)
        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        expr_dir = os.path.join('.output', self.opt.test_name)
        utils.mkdirs(expr_dir)
        file_name = os.path.join(expr_dir, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt
Ejemplo n.º 17
0
    def parse(self):
        if not self.initialized:
            self.initialize()
        self.opt = self.parser.parse_args()

        str_ids = self.opt.gpu_ids.split(',')
        self.opt.gpu_ids = []
        for str_id in str_ids:
            id = int(str_id)
            if id >= 0:
                self.opt.gpu_ids.append(id)

        if self.opt.mtlalpha == 1.0:
            self.opt.mtl_mode = 'ctc'
        elif self.opt.mtlalpha == 0.0:
            self.opt.mtl_mode = 'att'
        else:
            self.opt.mtl_mode = 'mtl'

        args = vars(self.opt)

        print('------------ Options -------------')
        for k, v in sorted(args.items()):
            print('%s: %s' % (str(k), str(v)))
        print('-------------- End ----------------')

        # save to the disk
        exp_path = os.path.join(self.opt.checkpoints_dir, self.opt.name)
        utils.mkdirs(exp_path)
        self.opt.exp_path = exp_path
        file_name = os.path.join(exp_path, 'opt.txt')
        with open(file_name, 'wt') as opt_file:
            opt_file.write('------------ Options -------------\n')
            for k, v in sorted(args.items()):
                opt_file.write('%s: %s\n' % (str(k), str(v)))
            opt_file.write('-------------- End ----------------\n')
        return self.opt
Ejemplo n.º 18
0
def test():
    """
    Test(Zooming SloMo) - inference on set of input data or Vid4 data
    """
    # set context and load the model
    ctx = get_extension_context(args.context)
    nn.set_default_context(ctx)
    nn.load_parameters(args.model)
    input_dir = args.input_dir
    n_ot = 7

    # list all input sequence folders containing input frames
    inp_dir_list = sorted(glob.glob(input_dir + '/*'))
    inp_dir_name_list = []
    avg_psnr_l = []
    avg_psnr_y_l = []
    avg_ssim_y_l = []
    sub_folder_name_l = []
    save_folder = 'results'
    # for each sub-folder
    for inp_dir in inp_dir_list:
        gt_tested_list = []
        inp_dir_name = inp_dir.split('/')[-1]
        sub_folder_name_l.append(inp_dir_name)

        inp_dir_name_list.append(inp_dir_name)
        save_inp_folder = osp.join(save_folder, inp_dir_name)
        img_low_res_list = sorted(glob.glob(inp_dir + '/*'))

        util.mkdirs(save_inp_folder)
        imgs = util.read_seq_imgs_(inp_dir)

        img_gt_l = []
        if args.metrics:
            replace_str = 'LR'
            for img_gt_path in sorted(glob.glob(osp.join(inp_dir.replace(replace_str, 'HR'), '*'))):
                img_gt_l.append(util.read_image(img_gt_path))

        avg_psnr, avg_psnr_sum, cal_n = 0, 0, 0
        avg_psnr_y, avg_psnr_sum_y = 0, 0
        avg_ssim_y, avg_ssim_sum_y = 0, 0

        skip = args.metrics

        select_idx_list = util.test_index_generation(
            skip, n_ot, len(img_low_res_list))

        # process each image
        for select_idxs in select_idx_list:
            # get input images
            select_idx = [select_idxs[0]]
            gt_idx = select_idxs[1]
            imgs_in = F.gather_nd(
                imgs, indices=nn.Variable.from_numpy_array(select_idx))
            imgs_in = F.reshape(x=imgs_in, shape=(1,) + imgs_in.shape)
            output = zooming_slo_mo_network(imgs_in, args.only_slomo)
            outputs = output[0]
            outputs.forward(clear_buffer=True)

            for idx, name_idx in enumerate(gt_idx):
                if name_idx in gt_tested_list:
                    continue
                gt_tested_list.append(name_idx)
                output_f = outputs.d[idx, :, :, :]
                output = util.tensor2img(output_f)
                cv2.imwrite(osp.join(save_inp_folder,
                                     '{:08d}.png'.format(name_idx + 1)), output)
                print("Saving :", osp.join(save_inp_folder,
                                           '{:08d}.png'.format(name_idx + 1)))

                if args.metrics:
                    # calculate PSNR
                    output = output / 255.
                    ground_truth = np.copy(img_gt_l[name_idx])
                    cropped_output = output
                    cropped_gt = ground_truth

                    crt_psnr = util.calculate_psnr(
                        cropped_output * 255, cropped_gt * 255)
                    cropped_gt_y = util.bgr2ycbcr(cropped_gt, only_y=True)
                    cropped_output_y = util.bgr2ycbcr(
                        cropped_output, only_y=True)
                    crt_psnr_y = util.calculate_psnr(
                        cropped_output_y * 255, cropped_gt_y * 255)
                    crt_ssim_y = util.calculate_ssim(
                        cropped_output_y * 255, cropped_gt_y * 255)

                    avg_psnr_sum += crt_psnr
                    avg_psnr_sum_y += crt_psnr_y
                    avg_ssim_sum_y += crt_ssim_y
                    cal_n += 1

        if args.metrics:
            avg_psnr = avg_psnr_sum / cal_n
            avg_psnr_y = avg_psnr_sum_y / cal_n
            avg_ssim_y = avg_ssim_sum_y / cal_n

            avg_psnr_l.append(avg_psnr)
            avg_psnr_y_l.append(avg_psnr_y)
            avg_ssim_y_l.append(avg_ssim_y)

    if args.metrics:
        print('################ Tidy Outputs ################')
        for name, ssim, psnr_y in zip(sub_folder_name_l, avg_ssim_y_l, avg_psnr_y_l):
            print(
                'Folder {} - Average SSIM: {:.6f}  PSNR-Y: {:.6f} dB. '.format(name, ssim, psnr_y))
        print('################ Final Results ################')
        print('Total Average SSIM: {:.6f}  PSNR-Y: {:.6f} dB for {} clips. '.format(
            sum(avg_ssim_y_l) / len(avg_ssim_y_l), sum(avg_psnr_y_l) /
            len(avg_psnr_y_l),
            len(inp_dir_list)))
Ejemplo n.º 19
0
    return u_out, u_target

if __name__ == '__main__':

    # Parse arguements
    args = Parser().parse()
    use_cuda = "cpu"
    if(torch.cuda.is_available()):
        use_cuda = "cuda"
    args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Torch device:{}".format(args.device))
    
    args.train_dir = args.run_dir + "/training"
    args.pred_dir = args.train_dir + "/predictions"
    mkdirs(args.train_dir, args.pred_dir)

    # Domain settings, matches solver settings
    x0 = 0
    x1 =  22*np.pi
    args.dx = (x1 - x0)/args.nel

    # Create training/testing loaders
    ksLoader = KSLoader()
    training_loader = ksLoader.createTrainingLoaderInitial(args.ntrain, x0, x1, args.nel, args.batch_size)
    
    test_cases = np.arange(1,5+1e-8,1).astype(int) # Some validation/test data
    testing_loader = ksLoader.createTestingLoader(args.data_dir, test_cases, batch_size=args.test_batch_size)

    # Create DenseED model
    denseED = DenseED(in_channels=args.nic, out_channels=args.noc,
        activations = [ReLU] * args.nb_hidden_layers
    elif args.activation_type == "sigmoid":
        activations = [Sigmoid] * args.nb_hidden_layers

    if args.hidden_type == "traditional":
        hidden_layer_types = [HiddenLayer] * args.nb_hidden_layers
    elif args.hidden_type == "residual":
        hidden_layer_types = [HiddenLayer] * (args.nb_hidden_layers - args.skip) + [ResidualHiddenLayer] * (args.skip)

    # P(\pi) PARAMS
    prior_alpha = 1.
    prior_beta = args.alpha0

    # DATA PARAMS
    # Create datasets and experiments folders is needed.
    dataset_dir = mkdirs("./datasets")
    mkdirs(args.experiment_dir)

    dataset_name = None
    
    if args.dataset == 'mnist_plus_rot' or args.dataset == 'svhn_pca':
        dataset = pjoin(dataset_dir, args.dataset + ".pkl")
    else:
        dataset = pjoin(dataset_dir, args.dataset + ".npz")


    print "Datasets dir: {}".format(os.path.abspath(dataset_dir))
    print "Experiment dir: {}".format(os.path.abspath(args.experiment_dir))

    train_and_eval_stickBreaking_ss_dgm(
        dataset=dataset,
Ejemplo n.º 21
0
def main(args):
    """
    Save nx.graph (Gss, Gts,...) and corresponding torch_geometric.data.PairData
    (via clevr_parse embedder api).
    """
    if (args.input_vocab_json == '') and (args.output_vocab_json == ''):
        logger.info(
            'Must give one of --input_vocab_json or --output_vocab_json')
        return
    graph_parser = clevr_parser.Parser(
        backend='spacy',
        model=args.parser_lm,
        has_spatial=True,
        has_matching=True).get_backend(identifier='spacy')
    embedder = clevr_parser.Embedder(
        backend='torch', parser=graph_parser).get_backend(identifier='torch')
    is_directed_graph = args.is_directed_graph  # Parse graphs as nx.MultiDiGraph

    out_dir, out_f_prefix = _get_out_dir_and_file_prefix(args)
    checkpoint_dir = f"{out_dir}/checkpoints"
    utils.mkdirs(checkpoint_dir)

    questions, img_scenes = get_questions_and_parsed_scenes(
        args.input_questions_json, args.input_parsed_img_scenes_json)
    if args.is_debug:
        set_default_level(10)
        questions = questions[:
                              128]  # default BSZ is 64 ensuring enought for batch iter
        logger.debug(
            f"In DEBUG mode, sampling {len(questions)} questions only..")
    # Process Vocab #
    vocab = _process_vocab(args, questions)

    # Encode all questions and programs
    logger.info('Encoding data')
    questions_encoded, programs_encoded, answers, image_idxs = [], [], [], []
    question_families = []
    orig_idxs = []

    # Graphs and Embeddings #
    data_s_list = []  # List [torch_geometric.data.Data]
    data_t_list = []  # List [torch_geometric.data.Data]
    num_samples = 0  # Counter for keeping track of processed samples
    num_skipped = 0  # Counter for tracking num of samples skipped
    for orig_idx, q in enumerate(questions):
        # First See if Gss, Gts are possible to extract.
        # If not (for e.g., some edges cases like plurality, skip data sample
        img_idx = q['image_index']
        img_fn = q['image_filename']
        logger.debug(f"\tProcessing Image - {img_idx}: {img_fn} ...")
        # q_idx = q['question_index']
        # q_fam_idx = q['question_family_index']
        ## 1: Ensure both Gs,Gt is parseable for this question sample, o.w. skip
        img_scene = list(
            filter(lambda x: x['image_index'] == img_idx, img_scenes))[0]
        try:
            Gt, t_doc = graph_parser.get_doc_from_img_scene(
                img_scene, is_directed_graph=is_directed_graph)
            X_t, ei_t, e_attr_t = embedder.embed_t(
                img_idx, args.input_parsed_img_scenes_json)
        except AssertionError as ae:
            logger.warning(f"AssertionError Encountered: {ae}")
            logger.warning(f"[{img_fn}] Excluding images with > 10 objects")
            num_skipped += 1
            continue
        if Gt is None and ("SKIP" in t_doc):
            # If the derendering pipeline failed, then just skip the
            # scene, don't process the labels (and text_scenes) for the image
            print(f"Got None img_doc at image_index: {img_idx}")
            print(f"Skipping all text_scenes for imgage idx: {img_idx}")
            num_skipped += 1
            continue
        s = q['question']
        orig_idx = q['question_index']
        try:
            Gs, s_doc = graph_parser.parse(s,
                                           return_doc=True,
                                           is_directed_graph=is_directed_graph)
            X_s, ei_s, e_attr_s = embedder.embed_s(s)
        except ValueError as ve:
            logger.warning(f"ValueError Encountered: {ve}")
            logger.warning(f"Skipping question: {s} for {img_fn}")
            num_skipped += 1
            continue
        if Gs is None and ("SKIP" in s_doc):
            logger.warning(
                "Got None as Gs and 'SKIP' in Gs_embd. (likely plural with CLEVR_OBJS label) "
            )
            logger.warning(
                f"SKIPPING processing {s} for {img_fn} and at {img_idx}")
            num_skipped += 1
            continue

        # Using ClevrData allows us a debug extension to Data
        data_s = ClevrData(x=X_s, edge_index=ei_s, edge_attr=e_attr_s)
        data_t = ClevrData(x=X_t, edge_index=ei_t, edge_attr=e_attr_t)
        data_s_list.append(data_s)
        data_t_list.append(data_t)

        question = q['question']
        orig_idxs.append(orig_idx)
        image_idxs.append(img_idx)
        if 'question_family_index' in q:
            question_families.append(q['question_family_index'])
        question_tokens = preprocess_utils.tokenize(question,
                                                    punct_to_keep=[';', ','],
                                                    punct_to_remove=['?', '.'])
        question_encoded = preprocess_utils.encode(
            question_tokens,
            vocab['question_token_to_idx'],
            allow_unk=args.encode_unk == 1)
        questions_encoded.append(question_encoded)

        has_prog_seq = 'program' in q
        if has_prog_seq:
            program = q['program']
            program_str = program_to_str(program, args.mode)
            program_tokens = preprocess_utils.tokenize(program_str)
            program_encoded = preprocess_utils.encode(
                program_tokens, vocab['program_token_to_idx'])
            programs_encoded.append(program_encoded)

        if 'answer' in q:
            ans = q['answer']
            answers.append(vocab['answer_token_to_idx'][ans])

        num_samples += 1
        logger.info("-" * 50)
        logger.info(f"Samples processed count = {num_samples}")
        if has_prog_seq:
            logger.info(f"\n[{orig_idx}]: question: {question} \n"
                        f"\tprog_str: {program_str} \n"
                        f"\tanswer: {ans}")
        logger.info("-" * 50)

        # ---- CHECKPOINT ---- #
        if num_samples % args.checkpoint_every == 0:
            logger.info(f"Checkpointing at {num_samples}")
            checkpoint_fn_prefix = f"{out_f_prefix}_{num_samples}"
            _out_dir = f"{checkpoint_dir}/{out_f_prefix}_{num_samples}"
            utils.mkdirs(_out_dir)
            out_fpp = f"{_out_dir}/{checkpoint_fn_prefix}"
            # ------------ Checkpoint .H5 ------------#
            logger.info(
                f"CHECKPOINT: Saving checkpoint files at directory: {out_fpp}")
            save_h5(f"{out_fpp}.h5", vocab, questions_encoded, image_idxs,
                    orig_idxs, programs_encoded, question_families, answers)
            # ------------ Checkpoint GRAPH DATA ------------#
            save_graph_pairdata(out_fpp,
                                data_s_list,
                                data_t_list,
                                is_directed_graph=is_directed_graph)
            logger.info(f"-------------- CHECKPOINT: COMPLETED --------")

        if (args.max_sample > 0) and (num_samples >= args.max_sample):
            logger.info(f"len(questions_encoded = {len(questions_encoded)}")
            logger.info("args.max_sample reached: Completing ... ")
            break

    logger.debug(f"Total samples skipped = {num_skipped}")
    logger.debug(f"Total samples processed = {num_samples}")
    out_fpp = f"{out_dir}/{out_f_prefix}"
    ## SAVE .H5: Baseline {dataset}_h5.h5 file (q,p,ans,img_idx) as usual
    logger.info(f"Saving baseline (processed) data in: {out_fpp}.h5")
    save_h5(f"{out_fpp}.h5", vocab, questions_encoded, image_idxs, orig_idxs,
            programs_encoded, question_families, answers)
    ## ------------  SAVE GRAPH DATA ------------ ##
    ## N.b. Ensure the len of theses lists are all equals
    save_graph_pairdata(out_fpp,
                        data_s_list,
                        data_t_list,
                        is_directed_graph=is_directed_graph)
    logger.info(f"Saved Graph Data in: {out_fpp}_*.[h5|.gpickle|.npz|.pt] ")
Ejemplo n.º 22
0
        lr_loader = LRSparseDataLoader()
    else:
        dataset = CAIDataset()
        lr_loader = LRDataLoader()

    dataset.initialize(opt)
    lr_loader.initialize(dataset=dataset, opt=opt)

    for c in [1.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 300.0, 500.0]:
        setattr(opt, 'propensity', 'min')
        setattr(opt, 'clip_value', c)
        setattr(opt, 'save_epoch_freq', 1)
        setattr(opt, 'name', 'clip_' + str(int(c)))
        print(opt)
        expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
        util.mkdirs(expr_dir)

        logfile = open(os.path.join(expr_dir, "log.txt"), 'a+')
        sys.stdout = Logger(init_stdout, logfile)

        model = create_model(opt)
        total_steps = 0

        for epoch in range(opt.epoch):
            epoch_iter = 0
            t_data = 0.0
            epoch_start_time = time.time()
            iter_start_time = time.time()

            for i, data in enumerate(lr_loader):
                # print('[' + str(epoch) + "][" + str(epoch_iter) + ']')
Ejemplo n.º 23
0
def save_visualization(vis_rows, outputs, batch_data, save_dir, opt):
    # fetch data and predictions
    mag_mix = batch_data['audio_mix_mags']
    phase_mix = batch_data['audio_mix_phases']
    visuals = batch_data['visuals']

    pred_masks_ = outputs['pred_mask']
    gt_masks_ = outputs['gt_mask']
    mag_mix_ = outputs['audio_mix_mags']
    weight_ = outputs['weight']
    visual_object = outputs['visual_object']
    gt_label = outputs['gt_label']
    _, pred_label = torch.max(output['pred_label'], 1)
    label_list = ['Banjo', 'Cello', 'Drum', 'Guitar', 'Harp', 'Harmonica', 'Oboe', 'Piano', 'Saxophone', \
                    'Trombone', 'Trumpet', 'Violin', 'Flute','Accordion', 'Horn']

    # unwarp log scale
    B = mag_mix.size(0)
    if opt.log_freq:
        grid_unwarp = torch.from_numpy(
            utils.warpgrid(B,
                           opt.stft_frame // 2 + 1,
                           gt_masks_.size(3),
                           warp=False)).to(opt.device)
        pred_masks_linear = F.grid_sample(pred_masks_, grid_unwarp)
        gt_masks_linear = F.grid_sample(gt_masks_, grid_unwarp)
    else:
        pred_masks_linear = pred_masks_
        gt_masks_linear = gt_masks_

    # convert into numpy
    mag_mix = mag_mix.numpy()
    mag_mix_ = mag_mix_.detach().cpu().numpy()
    phase_mix = phase_mix.numpy()
    weight_ = weight_.detach().cpu().numpy()
    pred_masks_ = pred_masks_.detach().cpu().numpy()
    pred_masks_linear = pred_masks_linear.detach().cpu().numpy()
    gt_masks_ = gt_masks_.detach().cpu().numpy()
    gt_masks_linear = gt_masks_linear.detach().cpu().numpy()
    visual_object = visual_object.detach().cpu().numpy()
    gt_label = gt_label.detach().cpu().numpy()
    pred_label = pred_label.detach().cpu().numpy()

    # loop over each example
    for j in range(min(B, opt.num_visualization_examples)):
        row_elements = []

        # video names
        prefix = str(j) + '-' + label_list[int(
            gt_label[j])] + '-' + label_list[int(pred_label[j])]
        utils.mkdirs(os.path.join(save_dir, prefix))

        # save mixture
        mix_wav = utils.istft_coseparation(mag_mix[j, 0],
                                           phase_mix[j, 0],
                                           hop_length=opt.stft_hop)
        mix_amp = utils.magnitude2heatmap(mag_mix_[j, 0])
        weight = utils.magnitude2heatmap(weight_[j, 0], log=False, scale=100.)
        filename_mixwav = os.path.join(prefix, 'mix.wav')
        filename_mixmag = os.path.join(prefix, 'mix.jpg')
        filename_weight = os.path.join(prefix, 'weight.jpg')
        imsave(os.path.join(save_dir, filename_mixmag), mix_amp[::-1, :, :])
        imsave(os.path.join(save_dir, filename_weight), weight[::-1, :])
        wavfile.write(os.path.join(save_dir, filename_mixwav),
                      opt.audio_sampling_rate, mix_wav)
        row_elements += [{
            'text': prefix
        }, {
            'image': filename_mixmag,
            'audio': filename_mixwav
        }]

        # GT and predicted audio reconstruction
        gt_mag = mag_mix[j, 0] * gt_masks_linear[j, 0]
        gt_wav = utils.istft_coseparation(gt_mag,
                                          phase_mix[j, 0],
                                          hop_length=opt.stft_hop)
        pred_mag = mag_mix[j, 0] * pred_masks_linear[j, 0]
        preds_wav = utils.istft_coseparation(pred_mag,
                                             phase_mix[j, 0],
                                             hop_length=opt.stft_hop)

        # output masks
        filename_gtmask = os.path.join(prefix, 'gtmask.jpg')
        filename_predmask = os.path.join(prefix, 'predmask.jpg')
        gt_mask = (np.clip(gt_masks_[j, 0], 0, 1) * 255).astype(np.uint8)
        pred_mask = (np.clip(pred_masks_[j, 0], 0, 1) * 255).astype(np.uint8)
        imsave(os.path.join(save_dir, filename_gtmask), gt_mask[::-1, :])
        imsave(os.path.join(save_dir, filename_predmask), pred_mask[::-1, :])

        # ouput spectrogram (log of magnitude, show colormap)
        filename_gtmag = os.path.join(prefix, 'gtamp.jpg')
        filename_predmag = os.path.join(prefix, 'predamp.jpg')
        gt_mag = utils.magnitude2heatmap(gt_mag)
        pred_mag = utils.magnitude2heatmap(pred_mag)
        imsave(os.path.join(save_dir, filename_gtmag), gt_mag[::-1, :, :])
        imsave(os.path.join(save_dir, filename_predmag), pred_mag[::-1, :, :])

        # output audio
        filename_gtwav = os.path.join(prefix, 'gt.wav')
        filename_predwav = os.path.join(prefix, 'pred.wav')
        wavfile.write(os.path.join(save_dir, filename_gtwav),
                      opt.audio_sampling_rate, gt_wav)
        wavfile.write(os.path.join(save_dir, filename_predwav),
                      opt.audio_sampling_rate, preds_wav)

        row_elements += [{
            'image': filename_predmag,
            'audio': filename_predwav
        }, {
            'image': filename_gtmag,
            'audio': filename_gtwav
        }, {
            'image': filename_predmask
        }, {
            'image': filename_gtmask
        }]

        row_elements += [{'image': filename_weight}]
        vis_rows.append(row_elements)
Ejemplo n.º 24
0
def main(args):
    if (args.input_vocab_json == '') and (args.output_vocab_json == ''):
        print('Must give one of --input_vocab_json or --output_vocab_json')
        return

    print('Loading data')
    with open(args.input_questions_json, 'r') as f:
        questions = json.load(f)['questions']

    # Either create the vocab or load it from disk
    if args.input_vocab_json == '' or args.expand_vocab == 1:
        print('Building vocab')
        if 'answer' in questions[0]:
            answer_token_to_idx = preprocess_utils.build_vocab(
                (q['answer'] for q in questions))
        question_token_to_idx = preprocess_utils.build_vocab(
            (q['question'] for q in questions),
            min_token_count=args.unk_threshold,
            punct_to_keep=[';', ','],
            punct_to_remove=['?', '.'])
        all_program_strs = []
        for q in questions:
            if 'program' not in q: continue
            program_str = program_to_str(q['program'], args.mode)
            if program_str is not None:
                all_program_strs.append(program_str)
        program_token_to_idx = preprocess_utils.build_vocab(all_program_strs)
        vocab = {
            'question_token_to_idx': question_token_to_idx,
            'program_token_to_idx': program_token_to_idx,
            'answer_token_to_idx': answer_token_to_idx,
        }

    if args.input_vocab_json != '':
        print('Loading vocab')
        if args.expand_vocab == 1:
            new_vocab = vocab
        with open(args.input_vocab_json, 'r') as f:
            vocab = json.load(f)
        if args.expand_vocab == 1:
            num_new_words = 0
            for word in new_vocab['question_token_to_idx']:
                if word not in vocab['question_token_to_idx']:
                    print('Found new word %s' % word)
                    idx = len(vocab['question_token_to_idx'])
                    vocab['question_token_to_idx'][word] = idx
                    num_new_words += 1
            print('Found %d new words' % num_new_words)

    if args.output_vocab_json != '':
        utils.mkdirs(os.path.dirname(args.output_vocab_json))
        with open(args.output_vocab_json, 'w') as f:
            json.dump(vocab, f)

    # Encode all questions and programs
    print('Encoding data')
    questions_encoded = []
    programs_encoded = []
    question_families = []
    orig_idxs = []
    image_idxs = []
    answers = []
    for orig_idx, q in enumerate(questions):
        question = q['question']

        orig_idxs.append(orig_idx)
        image_idxs.append(q['image_index'])
        if 'question_family_index' in q:
            question_families.append(q['question_family_index'])
        question_tokens = preprocess_utils.tokenize(question,
                                                    punct_to_keep=[';', ','],
                                                    punct_to_remove=['?', '.'])
        question_encoded = preprocess_utils.encode(
            question_tokens,
            vocab['question_token_to_idx'],
            allow_unk=args.encode_unk == 1)
        questions_encoded.append(question_encoded)

        if 'program' in q:
            program = q['program']
            program_str = program_to_str(program, args.mode)
            program_tokens = preprocess_utils.tokenize(program_str)
            program_encoded = preprocess_utils.encode(
                program_tokens, vocab['program_token_to_idx'])
            programs_encoded.append(program_encoded)

        if 'answer' in q:
            answers.append(vocab['answer_token_to_idx'][q['answer']])

    # Pad encoded questions and programs
    max_question_length = max(len(x) for x in questions_encoded)
    for qe in questions_encoded:
        while len(qe) < max_question_length:
            qe.append(vocab['question_token_to_idx']['<NULL>'])

    if len(programs_encoded) > 0:
        max_program_length = max(len(x) for x in programs_encoded)
        for pe in programs_encoded:
            while len(pe) < max_program_length:
                pe.append(vocab['program_token_to_idx']['<NULL>'])

    # Create h5 file
    print('Writing output')
    questions_encoded = np.asarray(questions_encoded, dtype=np.int32)
    programs_encoded = np.asarray(programs_encoded, dtype=np.int32)
    print(questions_encoded.shape)
    print(programs_encoded.shape)
    utils.mkdirs(os.path.dirname(args.output_h5_file))
    with h5py.File(args.output_h5_file, 'w') as f:
        f.create_dataset('questions', data=questions_encoded)
        f.create_dataset('image_idxs', data=np.asarray(image_idxs))
        f.create_dataset('orig_idxs', data=np.asarray(orig_idxs))

        if len(programs_encoded) > 0:
            f.create_dataset('programs', data=programs_encoded)
        if len(question_families) > 0:
            f.create_dataset('question_families',
                             data=np.asarray(question_families))
        if len(answers) > 0:
            f.create_dataset('answers', data=np.asarray(answers))
Ejemplo n.º 25
0
    (wordVectors[:nWords, :], wordVectors[nWords:, :]), axis=0)

visualizeWords = [
    "great", "cool", "brilliant", "wonderful", "well", "amazing", "worth",
    "sweet", "enjoyable", "boring", "bad", "dumb", "annoying", "female",
    "male", "queen", "king", "man", "woman", "rain", "snow", "hail", "coffee",
    "tea"
]

visualizeIdx = [tokens[word] for word in visualizeWords]
visualizeVecs = wordVectors[visualizeIdx, :]
temp = (visualizeVecs - np.mean(visualizeVecs, axis=0))
covariance = 1.0 / len(visualizeIdx) * temp.T.dot(temp)
U, S, V = np.linalg.svd(covariance)
coord = temp.dot(U[:, 0:2])

plt.figure(figsize=(10, 6))
for i in range(len(visualizeWords)):
    plt.text(coord[i, 0],
             coord[i, 1],
             visualizeWords[i],
             bbox=dict(facecolor='green', alpha=0.1))

plt.xlim((np.min(coord[:, 0]), np.max(coord[:, 0])))
plt.ylim((np.min(coord[:, 1]), np.max(coord[:, 1])))

save_dir = os.path.join(cfg.visual_path, cfg.exp_name)
mkdirs(save_dir)
figure_file = os.path.join(save_dir, 'word_vectors.png')
plt.savefig(figure_file)
Ejemplo n.º 26
0
def main():

    opt = fake_opt.Asr_train()
    exp_path = os.path.join(opt.checkpoints_dir, opt.name)
    utils.mkdirs(exp_path)
    opt.exp_path = exp_path
    #    device = torch.device("cuda:{}".format(opt.gpu_ids[0]) if len(opt.gpu_ids) > 0 and torch.cuda.is_available() else "cpu")
    device = torch.device("cuda")
    #cuda_ava = torch.cuda.is_available()
    visualizer = Visualizer(opt)
    logging = visualizer.get_logger()
    acc_report = visualizer.add_plot_report(['train/acc', 'val/acc'],
                                            'acc.png')
    loss_report = visualizer.add_plot_report(['train/loss', 'val/loss'],
                                             'loss.png')

    # data
    logging.info("Building dataset.")
    train_dataset = SequentialDataset(
        opt,
        os.path.join(opt.dataroot, 'train_new'),
        os.path.join(opt.dict_dir, 'train/vocab'),
    )
    val_dataset = SequentialDataset(
        opt,
        os.path.join(opt.dataroot, 'dev_new'),
        os.path.join(opt.dict_dir, 'train/vocab'),
    )
    train_sampler = BucketingSampler(train_dataset, batch_size=opt.batch_size)
    train_loader = SequentialDataLoader(train_dataset,
                                        num_workers=opt.num_workers,
                                        batch_sampler=train_sampler)
    val_loader = SequentialDataLoader(val_dataset,
                                      batch_size=int(opt.batch_size / 2),
                                      num_workers=opt.num_workers,
                                      shuffle=False)
    opt.idim = train_dataset.get_feat_size()  #257
    opt.odim = train_dataset.get_num_classes()  #4233
    opt.char_list = train_dataset.get_char_list()
    opt.train_dataset_len = len(train_dataset)
    logging.info('#input dims : ' + str(opt.idim))
    logging.info('#output dims: ' + str(opt.odim))
    logging.info("Dataset ready!")

    # Setup a model
    asr_model = E2E(opt)
    fbank_model = FbankModel(opt)
    lr = opt.lr
    eps = opt.eps
    iters = opt.iters
    start_epoch = opt.start_epoch
    best_loss = opt.best_loss
    best_acc = opt.best_acc
    if opt.resume:
        model_path = os.path.join(opt.works_dir, opt.resume)
        if os.path.isfile(model_path):
            package = torch.load(model_path,
                                 map_location=lambda storage, loc: storage)
            lr = package.get('lr', opt.lr)
            eps = package.get('eps', opt.eps)
            best_loss = package.get('best_loss', float('inf'))
            best_acc = package.get('best_acc', 0)
            start_epoch = int(package.get('epoch', 0))
            iters = int(package.get('iters', 0))

            acc_report = package.get('acc_report', acc_report)
            loss_report = package.get('loss_report', loss_report)
            visualizer.set_plot_report(acc_report, 'acc.png')
            visualizer.set_plot_report(loss_report, 'loss.png')

            asr_model = E2E.load_model(model_path, 'asr_state_dict')
            fbank_model = FbankModel.load_model(model_path, 'fbank_state_dict')
            logging.info('Loading model {} and iters {}'.format(
                model_path, iters))
        else:
            print("no checkpoint found at {}".format(model_path))
    asr_model.cuda()
    fbank_model.cuda()
    print(asr_model)

    # Setup an optimizer
    parameters = filter(
        lambda p: p.requires_grad,
        itertools.chain(asr_model.parameters(), fbank_model.parameters()))
    #parameters = filter(lambda p: p.requires_grad, itertools.chain(asr_model.parameters()))
    if opt.opt_type == 'adadelta':
        optimizer = torch.optim.Adadelta(parameters, rho=0.95, eps=eps)
    elif opt.opt_type == 'adam':
        optimizer = torch.optim.Adam(parameters,
                                     lr=lr,
                                     betas=(opt.beta1, 0.999))

    asr_model.train()
    fbank_model.train()
    sample_rampup = utils.ScheSampleRampup(opt.sche_samp_start_iter,
                                           opt.sche_samp_final_iter,
                                           opt.sche_samp_final_rate)
    sche_samp_rate = sample_rampup.update(iters)

    fbank_cmvn_file = os.path.join(opt.exp_path, 'fbank_cmvn.npy')
    #fbank_cmvn_file = os.path.join(opt.exp_path, 'cmvn.npy')
    if os.path.exists(fbank_cmvn_file):
        fbank_cmvn = np.load(fbank_cmvn_file)
    else:
        for i, (data) in enumerate(train_loader, start=0):
            utt_ids, spk_ids, inputs, log_inputs, targets, input_sizes, target_sizes = data
            fbank_cmvn = fbank_model.compute_cmvn(inputs, input_sizes)
            #fbank_cmvn = FbankModel.compute_cmvn(inputs, input_sizes)
            #if fbank_cmvn is not None:
            if fbank_model.cmvn_processed_num >= fbank_model.cmvn_num:
                fbank_cmvn = fbank_model.compute_cmvn(inputs, input_sizes)
                np.save(fbank_cmvn_file, fbank_cmvn)
                print('save fbank_cmvn to {}'.format(fbank_cmvn_file))
                break
    fbank_cmvn = torch.FloatTensor(fbank_cmvn)

    for epoch in range(start_epoch, opt.epochs):
        if epoch > opt.shuffle_epoch:
            print("Shuffling batches for the following epochs")
            train_sampler.shuffle(epoch)
        for i, (data) in enumerate(train_loader,
                                   start=(iters * opt.batch_size) %
                                   len(train_dataset)):
            utt_ids, spk_ids, inputs, log_inputs, targets, input_sizes, target_sizes = data
            fbank_features = fbank_model(inputs, fbank_cmvn)

            #utt_ids, spk_ids, fbank_features, targets, input_sizes, target_sizes = data
            #loss_ctc, loss_att, acc, context = asr_model(fbank_features, targets, input_sizes, target_sizes, sche_samp_rate)
            loss_ctc, loss_att, acc = asr_model(fbank_features, targets,
                                                input_sizes, target_sizes,
                                                sche_samp_rate)
            loss = opt.mtlalpha * loss_ctc + (1 - opt.mtlalpha) * loss_att
            optimizer.zero_grad()  # Clear the parameter gradients
            loss.backward()
            # compute the gradient norm to check if it is normal or not 'fbank_state_dict': fbank_model.state_dict(),
            grad_norm = torch.nn.utils.clip_grad_norm_(asr_model.parameters(),
                                                       opt.grad_clip)
            if math.isnan(grad_norm):
                logging.warning('grad norm is nan. Do not update model.')
            else:
                optimizer.step()

            iters += 1
            errors = {
                'train/loss': loss.item(),
                'train/loss_ctc': loss_ctc.item(),
                'train/acc': acc,
                'train/loss_att': loss_att.item()
            }
            visualizer.set_current_errors(errors)
            if iters % opt.print_freq == 0:
                visualizer.print_current_errors(epoch, iters)
                state = {
                    'asr_state_dict': asr_model.state_dict(),
                    'opt': opt,
                    'epoch': epoch,
                    'iters': iters,
                    'eps': opt.eps,
                    'lr': opt.lr,
                    'best_loss': best_loss,
                    'best_acc': best_acc,
                    'acc_report': acc_report,
                    'loss_report': loss_report
                }
                filename = 'latest'
                utils.save_checkpoint(state, opt.exp_path, filename=filename)

            if iters % opt.validate_freq == 0:
                sche_samp_rate = sample_rampup.update(iters)
                print("iters {} sche_samp_rate {}".format(
                    iters, sche_samp_rate))
                asr_model.eval()
                fbank_model.eval()
                torch.set_grad_enabled(False)
                num_saved_attention = 0
                for i, (data) in tqdm(enumerate(val_loader, start=0)):
                    utt_ids, spk_ids, inputs, log_inputs, targets, input_sizes, target_sizes = data
                    fbank_features = fbank_model(inputs, fbank_cmvn)
                    #utt_ids, spk_ids, fbank_features, targets, input_sizes, target_sizes = data
                    #loss_ctc, loss_att, acc, context = asr_model(fbank_features, targets, input_sizes, target_sizes, 0.0)
                    loss_ctc, loss_att, acc = asr_model(
                        fbank_features, targets, input_sizes, target_sizes,
                        0.0)

                    loss = opt.mtlalpha * loss_ctc + (1 -
                                                      opt.mtlalpha) * loss_att
                    errors = {
                        'val/loss': loss.item(),
                        'val/loss_ctc': loss_ctc.item(),
                        'val/acc': acc,
                        'val/loss_att': loss_att.item()
                    }
                    visualizer.set_current_errors(errors)

                    if opt.num_save_attention > 0 and opt.mtlalpha != 1.0:
                        if num_saved_attention < opt.num_save_attention:
                            att_ws = asr_model.calculate_all_attentions(
                                fbank_features, targets, input_sizes,
                                target_sizes)
                            for x in range(len(utt_ids)):
                                att_w = att_ws[x]
                                utt_id = utt_ids[x]
                                file_name = "{}_ep{}_it{}.png".format(
                                    utt_id, epoch, iters)
                                dec_len = int(target_sizes[x])
                                enc_len = int(input_sizes[x])
                                visualizer.plot_attention(
                                    att_w, dec_len, enc_len, file_name)
                                num_saved_attention += 1
                                if num_saved_attention >= opt.num_save_attention:
                                    break
                asr_model.train()
                fbank_model.train()
                torch.set_grad_enabled(True)

                visualizer.print_epoch_errors(epoch, iters)
                acc_report = visualizer.plot_epoch_errors(
                    epoch, iters, 'acc.png')
                loss_report = visualizer.plot_epoch_errors(
                    epoch, iters, 'loss.png')
                val_loss = visualizer.get_current_errors('val/loss')
                val_acc = visualizer.get_current_errors('val/acc')
                filename = None
                if opt.criterion == 'acc' and opt.mtl_mode != 'ctc':
                    if val_acc < best_acc:
                        logging.info('val_acc {} > best_acc {}'.format(
                            val_acc, best_acc))
                        opt.eps = utils.adadelta_eps_decay(
                            optimizer, opt.eps_decay)
                    else:
                        filename = 'model.acc.best'
                    best_acc = max(best_acc, val_acc)
                    logging.info('best_acc {}'.format(best_acc))
                elif opt.criterion == 'loss':
                    #elif args.criterion == 'loss':
                    if val_loss > best_loss:
                        logging.info('val_loss {} > best_loss {}'.format(
                            val_loss, best_loss))
                        opt.eps = utils.adadelta_eps_decay(
                            optimizer, opt.eps_decay)
                    else:
                        filename = 'model.loss.best'
                    best_loss = min(val_loss, best_loss)
                    logging.info('best_loss {}'.format(best_loss))
                state = {
                    'asr_state_dict': asr_model.state_dict(),
                    'opt': opt,
                    'epoch': epoch,
                    'iters': iters,
                    'eps': opt.eps,
                    'lr': opt.lr,
                    'best_loss': best_loss,
                    'best_acc': best_acc,
                    'acc_report': acc_report,
                    'loss_report': loss_report
                }
                utils.save_checkpoint(state, opt.exp_path, filename=filename)
                ##filename='epoch-{}_iters-{}_loss-{:.4f}_acc-{:.4f}.pth'.format(epoch, iters, val_loss, val_acc)
                ##utils.save_checkpoint(state, opt.exp_path, filename=filename)
                visualizer.reset()
Ejemplo n.º 27
0
                                      metric_name='fsim',
                                      smooth=True,
                                      verbose=True)
        print('Average SSIM: {}'.format(avg_ssim))
        print('Average SSIM (Smoothed): {}'.format(avg_ssim_smoothed))
        print('Average FSIM: {}'.format(avg_fsim))
        print('Average FSIM (Smoothed): {}'.format(avg_fsim_smoothed))


if __name__ == '__main__':
    args = cmd_option()
    gpu_ids = [int(x) for x in args.gpus.split(',')]
    # torch.cuda.set_device(gpu_ids[0])
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    args.save_weight_dir = 'face2sketch-norm_G{}_D{}-top{}-style_{}-flayers{}-weight-{:.1e}-{:.1e}-{:.1e}-epoch{' \
                           ':02d}-{}'.format(
        args.Gnorm, args.Dnorm, args.topk, args.train_style, "".join(map(str, args.flayers)),
        args.weight[0], args.weight[1], args.weight[2],
        args.epochs, args.other)
    args.save_weight_path = os.path.join(args.weight_root,
                                         args.save_weight_dir)

    if args.train_eval == 'train':
        print('Saving weight path', args.save_weight_path)
        utils.mkdirs(args.save_weight_path)
        train(args)
    elif args.train_eval == 'eval':
        test(args)
Ejemplo n.º 28
0
        q_type = find_clevr_question_type(
            executor.vocab['program_idx_to_token'][y_np[i][1]])
        if pred_ans == gt_ans:
            stats[q_type] += 1
            stats['correct_ans'] += 1
        if check_program(pg_np[i], y_np[i]):
            stats['correct_prog'] += 1

        stats['%s_tot' % q_type] += 1
        stats['total'] += 1
    print('| %d/%d questions processed, accuracy %f' %
          (stats['total'], len(
              loader.dataset), stats['correct_ans'] / stats['total']))

result = {
    'count_acc': stats['count'] / stats['count_tot'],
    'exist_acc': stats['exist'] / stats['exist_tot'],
    'compare_num_acc': stats['compare_num'] / stats['compare_num_tot'],
    'compare_attr_acc': stats['compare_attr'] / stats['compare_attr_tot'],
    'query_acc': stats['query'] / stats['query_tot'],
    'program_acc': stats['correct_prog'] / stats['total'],
    'overall_acc': stats['correct_ans'] / stats['total']
}
print(result)

utils.mkdirs(os.path.dirname(opt.save_result_path))
with open(opt.save_result_path, 'w') as fout:
    json.dump(result, fout)
print('| result saved to %s' % opt.save_result_path)
Ejemplo n.º 29
0
def train():
    mkdirs(config.checkpoint_path, config.best_model_path, config.logs)
    # load data
    src1_train_dataloader_fake, src1_train_dataloader_real, \
    src2_train_dataloader_fake, src2_train_dataloader_real, \
    src3_train_dataloader_fake, src3_train_dataloader_real, \
    tgt_valid_dataloader = get_dataset(config.src1_data, config.src1_train_num_frames,
                                       config.src2_data, config.src2_train_num_frames,
                                       config.src3_data, config.src3_train_num_frames,
                                       config.tgt_data, config.tgt_test_num_frames, config.batch_size)

    best_model_ACC = 0.0
    best_model_HTER = 1.0
    best_model_ACER = 1.0
    best_model_AUC = 0.0
    # 0:loss, 1:top-1, 2:EER, 3:HTER, 4:ACER, 5:AUC, 6:threshold
    valid_args = [np.inf, 0, 0, 0, 0, 0, 0, 0]

    loss_classifier = AverageMeter()
    classifer_top1 = AverageMeter()

    net = DG_model(config.model).to(device)
    ad_net_real = Discriminator().to(device)
    ad_net_fake = Discriminator().to(device)

    log = Logger()
    log.open(config.logs + config.tgt_data + '_log_SSDG.txt', mode='a')
    log.write(
        "\n----------------------------------------------- [START %s] %s\n\n" %
        (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '-' * 51))
    print("Norm_flag: ", config.norm_flag)
    log.write('** start training target model! **\n')
    log.write(
        '--------|------------- VALID -------------|--- classifier ---|------ Current Best ------|--------------|\n'
    )
    log.write(
        '  iter  |   loss   top-1   HTER    AUC    |   loss   top-1   |   top-1   HTER    AUC    |    time      |\n'
    )
    log.write(
        '-------------------------------------------------------------------------------------------------------|\n'
    )
    start = timer()
    criterion = {
        'softmax': nn.CrossEntropyLoss().cuda(),
        'triplet': HardTripletLoss(margin=0.1, hardest=False).cuda()
    }
    optimizer_dict = [
        {
            "params": filter(lambda p: p.requires_grad, net.parameters()),
            "lr": config.init_lr
        },
        {
            "params": filter(lambda p: p.requires_grad,
                             ad_net_real.parameters()),
            "lr": config.init_lr
        },
    ]
    optimizer = optim.SGD(optimizer_dict,
                          lr=config.init_lr,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)
    init_param_lr = []
    for param_group in optimizer.param_groups:
        init_param_lr.append(param_group["lr"])

    iter_per_epoch = 10

    src1_train_iter_real = iter(src1_train_dataloader_real)
    src1_iter_per_epoch_real = len(src1_train_iter_real)
    src2_train_iter_real = iter(src2_train_dataloader_real)
    src2_iter_per_epoch_real = len(src2_train_iter_real)
    src3_train_iter_real = iter(src3_train_dataloader_real)
    src3_iter_per_epoch_real = len(src3_train_iter_real)
    src1_train_iter_fake = iter(src1_train_dataloader_fake)
    src1_iter_per_epoch_fake = len(src1_train_iter_fake)
    src2_train_iter_fake = iter(src2_train_dataloader_fake)
    src2_iter_per_epoch_fake = len(src2_train_iter_fake)
    src3_train_iter_fake = iter(src3_train_dataloader_fake)
    src3_iter_per_epoch_fake = len(src3_train_iter_fake)

    max_iter = config.max_iter
    epoch = 1
    if (len(config.gpus) > 1):
        net = torch.nn.DataParallel(net).cuda()

    for iter_num in range(max_iter + 1):
        if (iter_num % src1_iter_per_epoch_real == 0):
            src1_train_iter_real = iter(src1_train_dataloader_real)
        if (iter_num % src2_iter_per_epoch_real == 0):
            src2_train_iter_real = iter(src2_train_dataloader_real)
        if (iter_num % src3_iter_per_epoch_real == 0):
            src3_train_iter_real = iter(src3_train_dataloader_real)
        if (iter_num % src1_iter_per_epoch_fake == 0):
            src1_train_iter_fake = iter(src1_train_dataloader_fake)
        if (iter_num % src2_iter_per_epoch_fake == 0):
            src2_train_iter_fake = iter(src2_train_dataloader_fake)
        if (iter_num % src3_iter_per_epoch_fake == 0):
            src3_train_iter_fake = iter(src3_train_dataloader_fake)
        if (iter_num != 0 and iter_num % iter_per_epoch == 0):
            epoch = epoch + 1
        param_lr_tmp = []
        for param_group in optimizer.param_groups:
            param_lr_tmp.append(param_group["lr"])

        net.train(True)
        ad_net_real.train(True)
        optimizer.zero_grad()
        adjust_learning_rate(optimizer, epoch, init_param_lr,
                             config.lr_epoch_1, config.lr_epoch_2)
        ######### data prepare #########
        src1_img_real, src1_label_real = src1_train_iter_real.next()
        src1_img_real = src1_img_real.cuda()
        src1_label_real = src1_label_real.cuda()
        input1_real_shape = src1_img_real.shape[0]

        src2_img_real, src2_label_real = src2_train_iter_real.next()
        src2_img_real = src2_img_real.cuda()
        src2_label_real = src2_label_real.cuda()
        input2_real_shape = src2_img_real.shape[0]

        src3_img_real, src3_label_real = src3_train_iter_real.next()
        src3_img_real = src3_img_real.cuda()
        src3_label_real = src3_label_real.cuda()
        input3_real_shape = src3_img_real.shape[0]

        src1_img_fake, src1_label_fake = src1_train_iter_fake.next()
        src1_img_fake = src1_img_fake.cuda()
        src1_label_fake = src1_label_fake.cuda()
        input1_fake_shape = src1_img_fake.shape[0]

        src2_img_fake, src2_label_fake = src2_train_iter_fake.next()
        src2_img_fake = src2_img_fake.cuda()
        src2_label_fake = src2_label_fake.cuda()
        input2_fake_shape = src2_img_fake.shape[0]

        src3_img_fake, src3_label_fake = src3_train_iter_fake.next()
        src3_img_fake = src3_img_fake.cuda()
        src3_label_fake = src3_label_fake.cuda()
        input3_fake_shape = src3_img_fake.shape[0]

        input_data = torch.cat([
            src1_img_real, src1_img_fake, src2_img_real, src2_img_fake,
            src3_img_real, src3_img_fake
        ],
                               dim=0)

        source_label = torch.cat([
            src1_label_real, src1_label_fake, src2_label_real, src2_label_fake,
            src3_label_real, src3_label_fake
        ],
                                 dim=0)

        ######### forward #########
        classifier_label_out, feature = net(input_data, config.norm_flag)

        ######### single side adversarial learning #########
        input1_shape = input1_real_shape + input1_fake_shape
        input2_shape = input2_real_shape + input2_fake_shape
        feature_real_1 = feature.narrow(0, 0, input1_real_shape)
        feature_real_2 = feature.narrow(0, input1_shape, input2_real_shape)
        feature_real_3 = feature.narrow(0, input1_shape + input2_shape,
                                        input3_real_shape)
        feature_real = torch.cat(
            [feature_real_1, feature_real_2, feature_real_3], dim=0)
        discriminator_out_real = ad_net_real(feature_real)

        ######### unbalanced triplet loss #########
        real_domain_label_1 = torch.LongTensor(input1_real_shape,
                                               1).fill_(0).cuda()
        real_domain_label_2 = torch.LongTensor(input2_real_shape,
                                               1).fill_(0).cuda()
        real_domain_label_3 = torch.LongTensor(input3_real_shape,
                                               1).fill_(0).cuda()
        fake_domain_label_1 = torch.LongTensor(input1_fake_shape,
                                               1).fill_(1).cuda()
        fake_domain_label_2 = torch.LongTensor(input2_fake_shape,
                                               1).fill_(2).cuda()
        fake_domain_label_3 = torch.LongTensor(input3_fake_shape,
                                               1).fill_(3).cuda()
        source_domain_label = torch.cat([
            real_domain_label_1, fake_domain_label_1, real_domain_label_2,
            fake_domain_label_2, real_domain_label_3, fake_domain_label_3
        ],
                                        dim=0).view(-1)
        triplet = criterion["triplet"](feature, source_domain_label)

        ######### cross-entropy loss #########
        real_shape_list = []
        real_shape_list.append(input1_real_shape)
        real_shape_list.append(input2_real_shape)
        real_shape_list.append(input3_real_shape)
        real_adloss = Real_AdLoss(discriminator_out_real, criterion["softmax"],
                                  real_shape_list)
        cls_loss = criterion["softmax"](classifier_label_out.narrow(
            0, 0, input_data.size(0)), source_label)

        ######### backward #########
        total_loss = cls_loss + config.lambda_triplet * triplet + config.lambda_adreal * real_adloss
        total_loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        loss_classifier.update(cls_loss.item())
        acc = accuracy(classifier_label_out.narrow(0, 0, input_data.size(0)),
                       source_label,
                       topk=(1, ))
        classifer_top1.update(acc[0])
        print('\r', end='', flush=True)
        print(
            '  %4.1f  |  %5.3f  %6.3f  %6.3f  %6.3f  |  %6.3f  %6.3f  |  %6.3f  %6.3f  %6.3f  | %s'
            % ((iter_num + 1) / iter_per_epoch, valid_args[0], valid_args[6],
               valid_args[3] * 100, valid_args[4] * 100, loss_classifier.avg,
               classifer_top1.avg, float(best_model_ACC),
               float(best_model_HTER * 100), float(
                   best_model_AUC * 100), time_to_str(timer() - start, 'min')),
            end='',
            flush=True)

        if (iter_num != 0 and (iter_num + 1) % iter_per_epoch == 0):
            # 0:loss, 1:top-1, 2:EER, 3:HTER, 4:AUC, 5:threshold, 6:ACC_threshold
            valid_args = eval(tgt_valid_dataloader, net, config.norm_flag)
            # judge model according to HTER
            is_best = valid_args[3] <= best_model_HTER
            best_model_HTER = min(valid_args[3], best_model_HTER)
            threshold = valid_args[5]
            if (valid_args[3] <= best_model_HTER):
                best_model_ACC = valid_args[6]
                best_model_AUC = valid_args[4]

            save_list = [
                epoch, valid_args, best_model_HTER, best_model_ACC,
                best_model_ACER, threshold
            ]
            save_checkpoint(save_list, is_best, net, config.gpus,
                            config.checkpoint_path, config.best_model_path)
            print('\r', end='', flush=True)
            log.write(
                '  %4.1f  |  %5.3f  %6.3f  %6.3f  %6.3f  |  %6.3f  %6.3f  |  %6.3f  %6.3f  %6.3f  | %s   %s'
                %
                ((iter_num + 1) / iter_per_epoch, valid_args[0], valid_args[6],
                 valid_args[3] * 100, valid_args[4] * 100, loss_classifier.avg,
                 classifer_top1.avg, float(best_model_ACC),
                 float(best_model_HTER * 100), float(best_model_AUC * 100),
                 time_to_str(timer() - start, 'min'), param_lr_tmp[0]))
            log.write('\n')
            time.sleep(0.01)
Ejemplo n.º 30
0
def burgers2d(run, nu, ngx, ngy, dt, T, ngx_out, ngy_out, save_dir,
    save_every, save_pvd=False, save_vector=False, plot=False, order=4):
    """simulate 2D Burgers' equation
    https://www.firedrakeproject.org/demos/burgers.py.html

    Args:
        run (int): # run
        nu (float): viscosity
        ngx (int): # grid in x axis
        ngy (int):
        dt (float): time step for simulation
        T (float): simulation time from 0 to T
        ngx_out (int): output # grid in x axis
        ngy_out (int): output # grid in y axis
        save_dir (str): runs folder
        order (int): order for sampling initial U
        save_every (int): save frequency in terms of # dt
        save_pvd (bool): save the field as vtk file for paraview
        save_vector (bool): save fenics field vector for later operation
        plot (bool): plot fields
    """
    assert not (save_pvd and save_vector), 'wasting memory to save pvd & vector'
    save_dir = save_dir + f'/run{run}'
    mkdirs(save_dir)
    mesh = df.UnitSquareMesh(ngx-1, ngy-1)
    mesh_out = df.UnitSquareMesh(ngx_out-1, ngy_out-1)
    V = df.VectorFunctionSpace(mesh, 'CG', 2, constrained_domain=PeriodicBoundary())
    Vout = df.VectorFunctionSpace(mesh_out, 'CG', 1, constrained_domain=PeriodicBoundary())

    # initial vector field
    u0, lam, c = init_field_fenics(mesh, V, order=order, seed=run)
    np.savez(save_dir + '/init_lam_c.npz', lam=lam, c=c)

    u = df.Function(V)
    u_old = df.Function(V)
    v = df.TestFunction(V)

    u = df.project(u0, V)
    u_old.assign(u)

    # backward Euler
    F = (df.inner((u - u_old)/dt, v) \
        + df.inner(df.dot(u, df.nabla_grad(u)), v) \
        + nu*df.inner(df.grad(u), df.grad(v)))*df.dx

    t = 0
    k = 0
    vtkfile = df.File(save_dir + f'/soln{ngx_out}x{ngy_out}_.pvd')
    u_out = df.project(u, Vout)
    u_out.rename('u', 'u')
    # (2, ngy_out, ngx_out) ?
    u_out_vertex = u_out.compute_vertex_values(mesh_out).reshape(2, ngx_out, ngy_out)
    np.save(save_dir + f'/u{k}.npy', u_out_vertex)
    # if plot:
    #     plot_row([u_out_vertex[0], u_out_vertex[1]], save_dir, f'u{k}', 
    #         same_range=False, plot_fn='imshow', cmap='jet')
    if save_pvd:
        vtkfile << (u_out, t)
    elif save_vector:
        u_out_vector = u_out.vector().get_local()
        np.save(save_dir + f'/u{k}_fenics_vec.npy', u_out_vector)
    
    # u_vec_load = np.load(save_dir + f'/u{k}.npy')
    # u_load = Function(Vout)
    # u_load.vector().set_local(u_vec_load)

   # not much log
    df.set_log_level(30)
    tic = time.time()

    while t < T:

        t += dt
        k += 1
        df.solve(F == 0, u)
        u_old.assign(u)
        
        u_out = df.project(u, Vout)
        u_out.rename('u', 'u')

        if k % save_every == 0:
            u_out_vertex = u_out.compute_vertex_values(mesh_out).reshape(2, ngx_out, ngy_out)
            np.save(save_dir + f'/u{k}.npy', u_out_vertex)
            # if k % (10 * save_every) == 0 and plot:
            #     plot_row([u_out_vertex[0], u_out_vertex[1]], save_dir, f'u{k}', 
            #         same_range=False, plot_fn='imshow', cmap='jet')
        if save_pvd:
            vtkfile << (u_out, t)
        elif save_vector:
            u_out_vector = u_out.vector().get_local()
            np.save(save_dir + f'/u{k}_fenics_vec.npy', u_out_vector)

        print(f'Run {run}: solved {k} steps with total {time.time()-tic:.3f} seconds')

    return time.time() - tic
        activations = [Sigmoid] * args.nb_hidden_layers

    if args.hidden_type == "traditional":
        hidden_layer_types = [HiddenLayer] * args.nb_hidden_layers
    elif args.hidden_type == "residual":
        hidden_layer_types = [HiddenLayer] * (
            args.nb_hidden_layers - args.skip) + [ResidualHiddenLayer
                                                  ] * (args.skip)

# P(\pi) PARAMS
    prior_alpha = 1.
    prior_beta = args.alpha0

    # DATA PARAMS
    # Create datasets and experiments folders is needed.
    dataset_dir = mkdirs("./datasets")
    mkdirs(args.experiment_dir)

    dataset_name = None

    if args.dataset == 'mnist_plus_rot' or args.dataset == 'svhn_pca':
        dataset = pjoin(dataset_dir, args.dataset + ".pkl")
    else:
        dataset = pjoin(dataset_dir, args.dataset + ".npz")

    print "Datasets dir: {}".format(os.path.abspath(dataset_dir))
    print "Experiment dir: {}".format(os.path.abspath(args.experiment_dir))

    train_and_eval_stickBreaking_vae(dataset=dataset,
                                     hidden_layer_sizes=hidden_layer_sizes,
                                     hidden_layer_types=hidden_layer_types,
Ejemplo n.º 32
0
import os
import sys
from flask import Flask, request, jsonify
import json
import logging
import RPi.GPIO as GPIO
from aquarium import Aquarium
import pi_info

# o_path=os.getcwd()
fpath = os.path.dirname(os.path.abspath(__file__))  # pi_service dir
sys.path.append(fpath + "/../")
from utils import utils

logpath = fpath + '/log'
utils.mkdirs(logpath)
logging.basicConfig(
    filename=logpath + '/info.log',
    filemode='w',
    format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
    datefmt='%H:%M:%S',
    level=logging.INFO)

app = Flask(__name__, static_url_path='')


@app.route('/pi/info', methods=['GET'])
def get_pi_info():
    info = pi_info.getPiInfo()
    return jsonify(info)