예제 #1
0
def evaluate(hps, generator, eval_loader, writer_eval):
    generator.eval()
    with torch.no_grad():
      for batch_idx, (spec, spec_lengths, y, y_lengths) in enumerate(eval_loader):

        spec, spec_lengths = spec.cuda(0), spec_lengths.cuda(0)
        y, y_lengths = y.cuda(0), y_lengths.cuda(0)


        # remove else
        spec = spec[:1]
        spec_lengths = spec_lengths[:1]
        y = y[:1]
        y_lengths = y_lengths[:1]
        break
      mel = spec_to_mel_torch(
          spec, 
          hps.data.filter_length, 
          hps.data.n_mel_channels, 
          hps.data.sampling_rate,
          hps.data.mel_fmin, 
          hps.data.mel_fmax)
      y_hat, mask, *_ = generator.module.infer(mel, spec_lengths, max_len=1000)
      y_hat_lengths = mask.sum([1,2]).long() * hps.data.hop_length

      mel = spec_to_mel_torch(
        spec, 
        hps.data.filter_length, 
        hps.data.n_mel_channels, 
        hps.data.sampling_rate,
        hps.data.mel_fmin, 
        hps.data.mel_fmax)
      y_hat_mel = mel_spectrogram_torch(
        y_hat.squeeze(1).float(),
        hps.data.filter_length,
        hps.data.n_mel_channels,
        hps.data.sampling_rate,
        hps.data.hop_length,
        hps.data.win_length,
        hps.data.mel_fmin,
        hps.data.mel_fmax
      )
    image_dict = {
      "gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
    }
    audio_dict = {
      "gen/audio": y_hat[0,:,:y_hat_lengths[0]]
    }
    if global_step == 0:
      image_dict.update({"gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
      audio_dict.update({"gt/audio": y[0,:,:y_lengths[0]]})

    utils.summarize(
      writer=writer_eval,
      global_step=global_step, 
      images=image_dict,
      audios=audio_dict,
      audio_sampling_rate=hps.data.sampling_rate
    )
    generator.train()
예제 #2
0
파일: train.py 프로젝트: sam1373/glow-tts
def evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger,
             writer_eval):
    if rank == 0:
        global global_step
        generator.eval()
        losses_tot = []
        with torch.no_grad():
            for batch_idx, (x, x_lengths, y,
                            y_lengths) in enumerate(val_loader):
                x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(
                    rank, non_blocking=True)
                y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(
                    rank, non_blocking=True)

                (z, y_m, y_logs,
                 logdet), attn, logw, logw_, x_m, x_logs = generator(x,
                                                                     x_lengths,
                                                                     y,
                                                                     y_lengths,
                                                                     gen=False)
                l_mle = 0.5 * math.log(2 * math.pi) + (
                    torch.sum(y_logs) +
                    0.5 * torch.sum(torch.exp(-2 * y_logs) *
                                    (z - y_m)**2) - torch.sum(logdet)
                ) / (torch.sum(y_lengths // hps.model.n_sqz) *
                     hps.model.n_sqz * hps.data.n_mel_channels)
                l_length = torch.sum((logw - logw_)**2) / torch.sum(x_lengths)
                loss_gs = [l_mle, l_length]
                loss_g = sum(loss_gs)

                if batch_idx == 0:
                    losses_tot = loss_gs
                else:
                    losses_tot = [x + y for (x, y) in zip(losses_tot, loss_gs)]

                if batch_idx % hps.train.log_interval == 0:
                    logger.info(
                        'Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
                        format(epoch, batch_idx * len(x),
                               len(val_loader.dataset),
                               100. * batch_idx / len(val_loader),
                               loss_g.item()))
                    logger.info([x.item() for x in loss_gs])

        losses_tot = [x / len(val_loader) for x in losses_tot]
        loss_tot = sum(losses_tot)
        scalar_dict = {"loss/g/total": loss_tot}
        scalar_dict.update(
            {"loss/g/{}".format(i): v
             for i, v in enumerate(losses_tot)})
        utils.summarize(writer=writer_eval,
                        global_step=global_step,
                        scalars=scalar_dict)
        logger.info('====> Epoch: {}'.format(epoch))
def isolated_obj_scenario(n, vis, output, debug):

    objects = YcbObjects(
        'objects/ycb_objects',
        mod_orn=['ChipsCan', 'MustardBottle', 'TomatoSoupCan'],
        mod_stiffness=['Strawberry'])
    data = IsolatedObjData(objects.obj_names, n, 'results')

    center_x, center_y = 0.05, -0.52
    network_path = 'network/trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98'
    camera = Camera((center_x, center_y, 1.9), (center_x, center_y, 0.785),
                    0.2, 2.0, (224, 224), 40)
    env = Environment(camera, vis=vis, debug=debug)
    generator = GraspGenerator(network_path, camera, 5)

    for obj_name in objects.obj_names:
        print(obj_name)
        for _ in range(n):

            path, mod_orn, mod_stiffness = objects.get_obj_info(obj_name)
            env.load_isolated_obj(path, mod_orn, mod_stiffness)
            env.move_away_arm()

            rgb, depth, _ = camera.get_cam_img()
            grasps, save_name = generator.predict_grasp(rgb,
                                                        depth,
                                                        n_grasps=3,
                                                        show_output=output)
            for i, grasp in enumerate(grasps):
                data.add_try(obj_name)
                x, y, z, roll, opening_len, obj_height = grasp
                if vis:
                    debug_id = p.addUserDebugLine([x, y, z], [x, y, 1.2],
                                                  [0, 0, 1])

                succes_grasp, succes_target = env.grasp(
                    (x, y, z), roll, opening_len, obj_height)
                if vis:
                    p.removeUserDebugItem(debug_id)
                if succes_grasp:
                    data.add_succes_grasp(obj_name)
                if succes_target:
                    data.add_succes_target(obj_name)
                    if save_name is not None:
                        os.rename(save_name + '.png',
                                  save_name + f'_SUCCESS_grasp{i}.png')
                    break
                env.reset_all_obj()
            env.remove_all_obj()

    data.write_json()
    summarize(data.save_dir, n)
예제 #4
0
파일: train.py 프로젝트: dmc31a42/glow-tts
def train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer):
  train_loader.sampler.set_epoch(epoch)
  global global_step

  generator.train()
  for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader):
    x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
    y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)

    # Train Generator
    optimizer_g.zero_grad()
    
    (z, y_m, y_logs, logdet), attn, logw, logw_, x_m, x_logs = generator(x, x_lengths, y, y_lengths, gen=False)
    l_mle = 0.5 * math.log(2 * math.pi) + (torch.sum(y_logs) + 0.5 * torch.sum(torch.exp(-2 * y_logs) * (z - y_m)**2) - torch.sum(logdet)) / (torch.sum(y_lengths // hps.model.n_sqz) * hps.model.n_sqz * hps.data.n_mel_channels) 
    l_length = torch.sum((logw - logw_)**2) / torch.sum(x_lengths)

    loss_gs = [l_mle, l_length]
    loss_g = sum(loss_gs)

    if hps.train.fp16_run:
      with amp.scale_loss(loss_g, optimizer_g._optim) as scaled_loss:
        scaled_loss.backward()
      grad_norm = commons.clip_grad_value_(amp.master_params(optimizer_g._optim), 5)
    else:
      loss_g.backward()
      grad_norm = commons.clip_grad_value_(generator.parameters(), 5)
    optimizer_g.step()
    
    if rank==0:
      if batch_idx % hps.train.log_interval == 0:
        (y_gen, *_), *_ = generator.module(x[:1], x_lengths[:1], gen=True)
        logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
          epoch, batch_idx * len(x), len(train_loader.dataset),
          100. * batch_idx / len(train_loader),
          loss_g.item()))
        logger.info([x.item() for x in loss_gs] + [global_step, optimizer_g.get_lr()])
        
        scalar_dict = {"loss/g/total": loss_g, "learning_rate": optimizer_g.get_lr(), "grad_norm": grad_norm}
        scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(loss_gs)})
        utils.summarize(
          writer=writer,
          global_step=global_step, 
          images={"y_org": utils.plot_spectrogram_to_numpy(y[0].data.cpu().numpy()), 
            "y_gen": utils.plot_spectrogram_to_numpy(y_gen[0].data.cpu().numpy()), 
            "attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()),
            },
          scalars=scalar_dict)
    global_step += 1
  
  if rank == 0:
    logger.info('====> Epoch: {}'.format(epoch))
예제 #5
0
def evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger,
             writer_eval):
    if rank == 0:
        global global_step
        generator.eval()
        losses_tot = []
        with torch.no_grad():
            for batch_idx, (x, x_lengths, y,
                            y_lengths) in enumerate(val_loader):
                x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(
                    rank, non_blocking=True)
                y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(
                    rank, non_blocking=True)

                (z, z_m, z_logs, logdet,
                 z_mask), (x_m, x_logs, x_mask), (attn, logw,
                                                  logw_) = generator(x,
                                                                     x_lengths,
                                                                     y,
                                                                     y_lengths,
                                                                     gen=False)
                l_mle = commons.mle_loss(z, z_m, z_logs, logdet, z_mask)
                l_length = commons.duration_loss(logw, logw_, x_lengths)

                loss_gs = [l_mle, l_length]
                loss_g = sum(loss_gs)

                if batch_idx == 0:
                    losses_tot = loss_gs
                else:
                    losses_tot = [x + y for (x, y) in zip(losses_tot, loss_gs)]

                if batch_idx % hps.train.log_interval == 0:
                    logger.info(
                        'Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
                        format(epoch, batch_idx * len(x),
                               len(val_loader.dataset),
                               100. * batch_idx / len(val_loader),
                               loss_g.item()))
                    logger.info([x.item() for x in loss_gs])

        losses_tot = [x / len(val_loader) for x in losses_tot]
        loss_tot = sum(losses_tot)
        scalar_dict = {"loss/g/total": loss_tot}
        scalar_dict.update(
            {"loss/g/{}".format(i): v
             for i, v in enumerate(losses_tot)})
        utils.summarize(writer=writer_eval,
                        global_step=global_step,
                        scalars=scalar_dict)
        logger.info('====> Epoch: {}'.format(epoch))
예제 #6
0
 def __getitem__(self, tuple_of_arguments):
     filter_applied = tuple_of_arguments[0]
     funcs = tuple_of_arguments[1]
     
     old_return_fixed_indices = filter_applied.return_fixed_indices
     filter_applied.return_fixed_indices = True
     indices_that_exist, fixed_indices = filter_applied.apply(self._data)
     filter_applied.return_fixed_indices = old_return_fixed_indices
     
     column_names = tuple_of_arguments[2] if len(tuple_of_arguments) > 2 else None
     if column_names is None:
         column_names = list(self._data.columns.values)
     
     summaries = [summarize(self._data.loc[x[0]:x[1]][column_names], funcs) for
                  x in zip(fixed_indices[:-1], fixed_indices[1:])]
     
     summary = functools.reduce(lambda df1, df2: pd.concat([df1, df2], ignore_index=False), summaries)
     summary["End_Period"] = fixed_indices[:-1]
     summary["Start_Period"] = fixed_indices[1:]
     
     summary.set_index('Start_Period', inplace=True)
     
     if not isinstance(summary, type(self._data)):
         raise TypeError(
             f'Interval Call to DataProcessor should return type {type(self._data)} but returned {type(summary)}')
     
     # if one wishes to rename the column names that can be done through another __call__
     return DataProcessor(summary)
예제 #7
0
def create_summary_by_zone(zones):
    """
    Creates summary file in zone folder
    Args:
        zones (iterable): Zone number
    """

    for zone in zones:
        zone_path, zone_file_prefix = get_zone_output_path(zone, data_path)
        df_avail, df_order, df_steer, df_cap = load_data_file_for_zone(
            zone, data_path)
        slots_observed, cslots_observed = get_slots_observed(
            zone_path,
            zone_file_prefix,
            df_avail,
            df_order.columns,
            check_existing=False)
        slots_active, cslots_active, slots_offered, cslots_offered = get_slots_active(
            zone_path, zone_file_prefix, df_order, slots_observed)
        summary = summarize(zone_path, zone_file_prefix, df_avail, df_order,
                            df_steer, df_cap, slots_offered, cslots_offered)
        summary = summary.reset_index()
        summary['EVENT_DTM'] = pd.to_datetime(summary['EVENT_DTM'])
        plot_arrivals(zone_path, zone_file_prefix, summary, '60min', 'mean',
                      'ALL')
예제 #8
0
def group_sentences(grouping):
    analyzer = SentimentIntensityAnalyzer()
    sentences = []
    sources = set()
    for article in grouping:
        summarized = summarize(article.content, 15)
        processed_summarized = []
        for sentence in summarized:
            sources.add(article.source)
            processed_summarized.append((sentence, "source " + article.source))
        sentences.extend(processed_summarized)
    common = common_keywords(grouping)
    scoring = []
    for sentence in sentences:
        final_score = 0
        score = analyzer.polarity_scores(sentence[0])
        for word in common:
            if word in sentence[0]:
                final_score += abs(score["compound"] - common[word])
        scoring.append((sentence, final_score))
    scoring = sorted(scoring, key=lambda x: x[1])[0:10]
    scoring = [sentence[0] for sentence in scoring]
    final_output = []
    for sentence in sentences:
        if sentence in scoring:
            final_output.append(sentence)
    return final_output, sources
예제 #9
0
	def __init__(self, content, source):
		self.content = content
		self.source = source
		self.sentences = split_sentences(content)
		self.vector = vectorize_text(content)
		self.word_scores = {}
		self.summary = summarize(self.content, 7)
		self.init_word_scores()
def evaluate(hps, model, eval_loader, writer_eval):
    model.eval()
    with torch.no_grad():
        for batch_idx, (img, txt, mask_img, mask_txt, pos_r, pos_c,
                        pos_t) in enumerate(eval_loader):
            img = img.cuda(0, non_blocking=True)
            txt = txt.cuda(0, non_blocking=True)
            mask_img = mask_img.cuda(0, non_blocking=True)
            mask_txt = mask_txt.cuda(0, non_blocking=True)
            pos_r = pos_r.cuda(0, non_blocking=True)
            pos_c = pos_c.cuda(0, non_blocking=True)
            pos_t = pos_t.cuda(0, non_blocking=True)

            img_i = img[:, :-1]
            img_o = img[:, 1:]
            txt_i = txt[:, :-1]
            txt_o = txt[:, 1:]
            mask_img_i = mask_img[:, :-1]
            mask_img_o = mask_img[:, 1:]
            mask_txt_i = mask_txt[:, :-1]
            mask_txt_o = mask_txt[:, 1:, 0]

            with autocast(enabled=hps.train.fp16_run):
                logits_img, logits_txt = model(img_i, txt_i, mask_img_i,
                                               mask_txt_i, pos_r, pos_c, pos_t)
                with autocast(enabled=False):
                    loss_img = loss_fn_img(logits_img, img_o, mask_img_o)
                    loss_txt = loss_fn_txt(logits_txt, txt_o, mask_txt_o)
                    loss_tot = loss_img * hps.train.lamb + loss_txt
            break

    scalar_dict = {
        "loss/total": loss_tot.item(),
        "loss/img": loss_img.item(),
        "loss/txt": loss_txt.item()
    }

    utils.summarize(
        writer=writer_eval,
        global_step=global_step,
        scalars=scalar_dict,
    )
    model.train()
예제 #11
0
def visualize(vectorizer, data=None):
    data = get_data(p.VAL_FILE,
                    vectorizer,
                    with_oov=p.POINTER_GEN,
                    aspect_file=p.ASPECT_FILE) if data is None else data
    model = new_model(vectorizer, data.dataset.aspects).eval()
    with open(p.MODEL_FILE, 'rb') as modelfile:
        model.load_state_dict(pkl.load(modelfile))
    batch = data[271:271 + p.DECODING_BATCH_SIZE]
    aspect_results = summarize(batch, model, beam_size=p.BEAM_SIZE)
    print_batch(batch, [r[0] for r in aspect_results], vectorizer,
                model.aspects)
    vis(p.VISUALIZATION_FILE,
        batch, [r[0] for r in aspect_results],
        vectorizer,
        model.aspects,
        0,
        0,
        pointer_gen=p.POINTER_GEN)
예제 #12
0
파일: views.py 프로젝트: foogunlana/project
def article(request, pk):
    cache_name = 'newscache:{}{}'.format('article', str(pk))
    icache = 'infinitecache:{}{}'.format('article', str(pk))

    response = cache.get(cache_name, None)
    if response:
        return response

    pk = int(pk)
    context = {}
    if request.method == 'GET':
        try:
            articles = mongo_calls('migrations')
            article = articles.find_one(
                {'article_id': pk},
                {'headline': 1, 'category': 1, 'writer': 1, 'summary': 1,
                 'keywords': 1, 'content': 1, 'photo': 1, 'posted': 1})
            aUri = HttpRequest.build_absolute_uri(request)
            try:
                article['posted'] = datetime.fromtimestamp(
                    article['posted'])
            except Exception:
                pass
            if article.get('summary'):
                article['par1'] = article['summary']
            else:
                article['par1'] = summarize(article)

            context = {'article': article, 'aUri': aUri}
            context['sUri'] = 'http://{}'.format(HttpRequest.get_host(request))

            response = render(request, 'news/article.html', context)
            cache.set(cache_name, response, 60*60*24)
            cache.set(icache, response, 60*60*24*14)
            return response
        except Exception:
            # Log exception
            response = cache.get(icache, None)
            return response

    context['sUri'] = 'http://{}'.format(HttpRequest.get_host(request))
    response = render(request, 'news/article.html', context)
    return response
예제 #13
0
def compare(instance, 
            statistic,
            summary,
            nsim=50, 
            methods=[], 
            verbose=False,
            htmlfile=None,
            method_setup=True,
            csvfile=None,
            q=0.2):
    
    results = []
    
    run_CV = np.any([getattr(m, 'need_CV') for m in methods])

    for method in methods:
        if method_setup:
            method.setup(instance.feature_cov, instance)
        method.q = q

    method_params, class_names, method_names = get_method_params(methods)

    for i in range(nsim):

        X, Y, beta = instance.generate()

        # make a hash representing same data

        instance_hash = hashlib.md5()
        instance_hash.update(X.tobytes())
        instance_hash.update(Y.tobytes())
        instance_hash.update(beta.tobytes())
        instance_id = instance_hash.hexdigest()

        l_min, l_1se, l_theory, sigma_reid = gaussian_setup(X.copy(), Y.copy(), run_CV=run_CV)

        for method, method_name, class_name, idx in zip(methods, 
                                                        method_names,
                                                        class_names,
                                                        range(len(methods))):
            if verbose:
                print('method:', method)

            M, result_df = statistic(method, instance, X.copy(), Y.copy(), beta.copy(), l_theory.copy(), l_min, l_1se, sigma_reid)

            if result_df is not None:
                result_df['instance_id'] = instance_id
                result_df['method_param'] = str(method_params.loc[idx])
                result_df['model_target'] = M.model_target
                result_df['method_name'] = method_name
                result_df['class_name'] = class_name
                results.append(result_df)
            else:
                print('Result was empty.')

            if i > 0 and len(results) > 0:

                results_df = pd.concat(results)

                for p in instance.params.columns:
                    results_df[p] = instance.params[p][0]

                if csvfile is not None:
                    f = open(csvfile, 'w')
                    f.write(results_df.to_csv(index_label=False) + '\n')
                    f.close()

                summary_df = summarize('method_param',
                                       results_df,
                                       summary)

                for p in instance.params.columns:
                    summary_df[p] = instance.params[p][0]

                if htmlfile is not None:
                    f = open(htmlfile, 'w')
                    f.write(summary_df.to_html() + '\n')
                    f.write(instance.params.to_html())
                    f.close()

                    # also write a summary CSV

                    f = open(csvfile.replace('.csv', '_summary.csv'), 'w')
                    f.write(summary_df.to_csv(index_label=False) + '\n')
                    f.close()
예제 #14
0
def train(sess, args, config):
    model_type = config.get('config', 'experiment')
    base_dir = os.path.expanduser(config.get('config', 'basedir'))
    tfrecord_dir = os.path.join(base_dir, config.get(model_type, 'tfrecord'))
    log_dir = os.path.join(base_dir, config.get('config', 'logdir'))
    adversarial_mode = config.get('config', 'mode')
    whether_noise = config.getboolean('generator', 'noise')
    t2s_task = config.getboolean('config', 't2s_task')
    noise_dim = config.getint('generator', 'noise_dim')

    source_only = config.getboolean('config', 'source_only')
    s2t_adversarial_weight = config.getfloat(model_type, 's2t_adversarial_weight')
    t2s_adversarial_weight = config.getfloat(model_type, 't2s_adversarial_weight')
    s2t_cyclic_weight = config.getfloat(model_type, 's2t_cyclic_weight')
    t2s_cyclic_weight = config.getfloat(model_type, 't2s_cyclic_weight')
    s2t_task_weight = config.getfloat(model_type, 'task_weight')
    t2s_task_weight = config.getfloat(model_type, 't2s_task_weight')
    s2t_style_weight = config.getfloat(model_type, 's2t_style_weight')
    t2s_style_weight = config.getfloat(model_type, 't2s_style_weight')
    discriminator_step = config.getint(model_type, 'discriminator_step')
    generator_step = config.getint(model_type, 'generator_step')
    save_dir = os.path.join(log_dir, utils.make_savedir(config))
#    save_dir = os.path.join(log_dir, config.get('config', 'savedir'))

    if args.delete and os.path.exists(save_dir):
        shutil.rmtree(save_dir)
	
    os.makedirs(save_dir, exist_ok=True)

    model_path = importlib.import_module(model_type)
    model = getattr(model_path, 'model')

    da_model = model(args, config)

    writer = tf.summary.FileWriter(save_dir, sess.graph)
    global_step = tf.train.get_or_create_global_step()

    get_batches = getattr(dataset_utils, model_type)

    tf.logging.info('Training %s with %s' % (model_type, adversarial_mode))
    
    if model_type == 'da_cil':
        with tf.name_scope(model_type + '_batches'):
            source_image_batch, source_label_batch, source_measure_batch, source_command_batch = get_batches('source', 'train', tfrecord_dir, batch_size=args.batch_size, config=config, args=args)
    
        if source_only:
            da_model(source_image_batch, None, source_measure_batch)
        else:       
            target_image_batch, _, _, _ = get_batches('target', 'train', tfrecord_dir, batch_size=args.batch_size, config=config, args=args)
            da_model(source_image_batch, target_image_batch, source_measure_batch)

        with tf.name_scope(model_type + '_objectives'):
            da_model.create_objective(source_label_batch, source_command_batch, adversarial_mode)

            if source_only:
                discriminator_loss = da_model.task_loss
                da_model.summary['discriminator_loss'] = discriminator_loss
            else:
                generator_loss = s2t_cyclic_weight * da_model.s2t_cyclic_loss + t2s_cyclic_weight * da_model.t2s_cyclic_loss + da_model.s2t_adversarial_loss[0] + da_model.t2s_adversarial_loss[0]
                generator_loss += s2t_style_weight * da_model.s2t_style_loss + t2s_style_weight * da_model.t2s_style_loss
                da_model.summary['generator_loss'] = generator_loss

                discriminator_loss = s2t_adversarial_weight * da_model.s2t_adversarial_loss[1] + t2s_adversarial_weight * da_model.t2s_adversarial_loss[1] + s2t_task_weight * da_model.task_loss 
                if t2s_task:
                    discriminator_loss += t2s_task_weight * da_model.t2s_task_loss 
                da_model.summary['discriminator_loss'] = discriminator_loss

    elif model_type == 'pixel_da':
        with tf.name_scope(model_type + '_batches'):
            source_image_batch, source_label_batch = get_batches('source', 'train', tfrecord_dir, batch_size=args.batch_size, config=config)
            mask_image_batch = source_image_batch[:,:,:,3]
            source_image_batch = source_image_batch[:,:,:,:3]
            if config.getboolean(model_type, 'input_mask'):
                tf.logging.info('Using masked input')
                mask_images = tf.to_float(tf.greater(mask_image_batch, 0.9))
                source_image_batch = tf.multiply(source_image_batch, tf.tile(tf.expand_dims(mask_images, 3), [1,1,1,3])) 
                
            # Label is already an 1-hot labels, but we expect categorical
            source_label_max_batch = tf.argmax(source_label_batch, 1)
            source_lateral_label_batch = (source_label_max_batch % 9) / 3
            source_head_label_batch = source_label_max_batch % 3
            
            target_image_batch, _ = get_batches('target', 'train', tfrecord_dir, batch_size=args.batch_size, config=config)

        da_model(source_image_batch, target_image_batch)

        with tf.name_scope(model_type + '_objectives'):
            da_model.create_objective(source_head_label_batch, source_lateral_label_batch, adversarial_mode)

            generator_loss = s2t_cyclic_weight * da_model.s2t_cyclic_loss + t2s_cyclic_weight * da_model.t2s_cyclic_loss + da_model.s2t_adversarial_loss[0] + da_model.t2s_adversarial_loss[0]
            generator_loss += s2t_style_weight * da_model.s2t_style_loss + t2s_style_weight * da_model.t2s_style_loss
            da_model.summary['generator_loss'] = generator_loss

            discriminator_loss = s2t_adversarial_weight * da_model.s2t_adversarial_loss[1] + t2s_adversarial_weight * da_model.t2s_adversarial_loss[1] + s2t_task_weight * da_model.transferred_task_loss 
            if t2s_task:
                discriminator_loss += t2s_task_weight * da_model.t2s_task_loss 
            da_model.summary['discriminator_loss'] = discriminator_loss

    else:
        raise Exception('Not supported model')

    with tf.name_scope('optimizer'):
        tf.logging.info('Getting optimizer')
        if args.lr_decay:
            decay_steps = config.getint('optimizer', 'decay_steps')
            decay_rate = config.getfloat('optimizer', 'decay_rate')
            learning_rate = tf.train.exponential_decay(args.learning_rate, global_step, decay_steps, decay_rate, staircase=True)
        else:
            learning_rate = args.learning_rate

        if not source_only:
            g_optimizer = _get_optimizer(config, args.optimizer)(learning_rate)
            g_optim = _gradient_clip(name='generator', optimizer=g_optimizer, loss=generator_loss, global_steps=global_step, clip_norm=args.clip_norm)
        d_optimizer = _get_optimizer(config, args.optimizer)(learning_rate)
        d_optim = _gradient_clip(name='discriminator', optimizer=d_optimizer, loss=discriminator_loss, global_steps=global_step, clip_norm=args.clip_norm)
    if not source_only:
        generator_summary, discriminator_summary = utils.summarize(da_model.summary, t2s_task) 
        utils.config_summary(save_dir, s2t_adversarial_weight, t2s_adversarial_weight, s2t_cyclic_weight, t2s_cyclic_weight, s2t_task_weight, t2s_task_weight, discriminator_step, generator_step, adversarial_mode, whether_noise, noise_dim, s2t_style_weight, t2s_style_weight)
    else:
        discriminator_summary = utils.summarize(da_model.summary, t2s_task, source_only)


    saver = tf.train.Saver(max_to_keep=5)
    sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))

    if args.load_ckpt:
        ckpt = tf.train.get_checkpoint_state(save_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess, coord)

    try:
        for iter_count in range(args.max_iter):
            # Update discriminator
            for disc_iter in range(discriminator_step):
                d_loss, _, steps = sess.run([discriminator_loss, d_optim, global_step])
                if not source_only and adversarial_mode == 'FISHER':
                    _, _ = sess.run([da_model.s2t_adversarial_loss[-1], da_model.t2s_adversarial_loss[-1]])
                #writer.add_summary(disc_sum, steps)
                tf.logging.info('Step %d: Discriminator loss=%.5f', steps, d_loss)

            if not source_only:
                for gen_iter in range(generator_step):
                    g_loss, _, steps = sess.run([generator_loss, g_optim, global_step])
                    #writer.add_summary(gen_sum, steps)
                    tf.logging.info('Step %d: Generator loss=%.5f', steps, g_loss)
            
            if (iter_count+1) % args.save_interval == 0:
                saver.save(sess, os.path.join(save_dir, model_type), global_step=(iter_count+1))
                tf.logging.info('Checkpoint save')

            if (iter_count+1) % args.summary_interval == 0:
                if not source_only:
                    disc_sum, gen_sum = sess.run([discriminator_summary, generator_summary])
                    writer.add_summary(gen_sum, steps)
                else:
                    disc_sum = sess.run(discriminator_summary)
                    
                writer.add_summary(disc_sum, steps)
                tf.logging.info('Summary at %d step' % (iter_count+1))

        
    except tf.errors.OutOfRangeError:
        print('Epoch limited')
    except KeyboardInterrupt:
        print('End training')
    finally:
        coord.request_stop()
        coord.join(threads)      
def train_and_evaluate(rank, epoch, hps, model, optim, scaler, loaders,
                       writers):
    train_loader, eval_loader = loaders
    if writers is not None:
        writer, writer_eval = writers

    train_loader.batch_sampler.set_epoch(epoch)
    global global_step

    model.train()
    for batch_idx, (img, txt, mask_img, mask_txt, pos_r, pos_c,
                    pos_t) in enumerate(train_loader):
        img = img.cuda(rank, non_blocking=True)
        txt = txt.cuda(rank, non_blocking=True)
        mask_img = mask_img.cuda(rank, non_blocking=True)
        mask_txt = mask_txt.cuda(rank, non_blocking=True)
        pos_r = pos_r.cuda(rank, non_blocking=True)
        pos_c = pos_c.cuda(rank, non_blocking=True)
        pos_t = pos_t.cuda(rank, non_blocking=True)

        img_i = img[:, :-1]
        img_o = img[:, 1:]
        txt_i = txt[:, :-1]
        txt_o = txt[:, 1:]
        mask_img_i = mask_img[:, :-1]
        mask_img_o = mask_img[:, 1:]
        mask_txt_i = mask_txt[:, :-1]
        mask_txt_o = mask_txt[:, 1:, 0]

        with autocast(enabled=hps.train.fp16_run):
            logits_img, logits_txt = model(img_i, txt_i, mask_img_i,
                                           mask_txt_i, pos_r, pos_c, pos_t)
            with autocast(enabled=False):
                loss_img = loss_fn_img(logits_img, img_o, mask_img_o)
                loss_txt = loss_fn_txt(logits_txt, txt_o, mask_txt_o)
                loss_tot = loss_img * hps.train.lamb + loss_txt
        optim.zero_grad()
        scaler.scale(loss_tot).backward()
        scaler.unscale_(optim)
        grad_norm = commons.grad_norm(model.parameters())
        scaler.step(optim)
        scaler.update()

        if rank == 0:
            num_tokens = mask_img.sum() + mask_txt.sum()
            if global_step % hps.train.log_interval == 0:
                lr = optim.param_groups[0]['lr']
                losses = [loss_tot, loss_img, loss_txt]
                print('Train Epoch: {} [{:.0f}%]'.format(
                    epoch, 100. * batch_idx / len(train_loader)))
                print([x.item() for x in losses] + [global_step, lr])

                scalar_dict = {
                    "loss/total": loss_tot,
                    "loss/img": loss_img,
                    "loss/txt": loss_txt
                }
                scalar_dict.update({
                    "learning_rate": lr,
                    "grad_norm": grad_norm,
                    "num_tokens": num_tokens
                })

                utils.summarize(writer=writer,
                                global_step=global_step,
                                scalars=scalar_dict)

            if global_step % hps.train.eval_interval == 0:
                print("START: EVAL")
                eval_loader.batch_sampler.set_epoch(global_step)
                evaluate(hps, model, eval_loader, writer_eval)
                utils.save_checkpoint(model, optim, hps.train.learning_rate,
                                      epoch,
                                      "model_{}.pth".format(global_step))
                print("END: EVAL")
        global_step += 1

    if rank == 0:
        print('====> Epoch: {}'.format(epoch))
예제 #16
0
 def summary(self):
     return summarize(self.content, MAX_SUMMARY_LENGTH)
예제 #17
0
 def summary(self):
     return utils.summarize(self.content, 50)
예제 #18
0
import utils

print("Welcome to NewsNow!")
choice = 'y'
while choice.lower() == 'y':
    query = input("Enter query to search: ").lower()
    links = utils.getLinks(query, 5)
    articles = utils.getDocuments(links)
    merged_article = utils.merge(articles)
    summary = utils.summarize(merged_article)
    print(f"Latest on {query}:\n{summary}\n")
    choice = input("Search for another article? [y/n]: ")
예제 #19
0
        raise ValueError('one should be rho, the other signal')

    df = pd.read_csv(csvfile)
    
    if opts.feature in ['power', 'fdr', 'selection_quality', 'conditional_power']:
        summary = BH_summary # same as marginal_summary
    elif opts.feature == 'risk':
        summary = estimator_summary
    elif opts.feature in ['coverage', 'mean_length', 'median_length', 'naive_length', 'median_strong_length']:
        summary = interval_summary
    else:
        raise ValueError("don't know how to summarize '%s'" % opts.feature)

    summary_df = summarize(['method_param',
                            opts.param,
                            opts.fixed],
                           df,
                           summary)

    plot(summary_df,
         opts.fixed,
         opts.param,
         {'power':'Full Model Power', 
          'selection_quality':'Selection Quality',
          'fdr': 'Full Model FDR',
          'risk': 'Risk',
          'coverage': 'Coverage',
          'mean_length': 'Mean Length',
          'naive_length': 'Mean Naive Length',
          'median_strong_length': 'Median Strong Length',
          'conditional_power': 'Conditional Power',
예제 #20
0
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
  net_g, net_d = nets
  optim_g, optim_d = optims
  scheduler_g, scheduler_d = schedulers
  train_loader, eval_loader = loaders
  if writers is not None:
    writer, writer_eval = writers

  train_loader.batch_sampler.set_epoch(epoch)
  global global_step

  net_g.train()
  net_d.train()
  for batch_idx, (spec, spec_lengths, y, y_lengths) in enumerate(train_loader):
    spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
    y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)


    with autocast(enabled=hps.train.fp16_run):
      mel = spec_to_mel_torch(
          spec, 
          hps.data.filter_length, 
          hps.data.n_mel_channels, 
          hps.data.sampling_rate,
          hps.data.mel_fmin, 
          hps.data.mel_fmax)
#       print('check',mel.shape)/
      y_hat, ids_slice, x_mask, z_mask,\
      (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(mel, spec_lengths, spec, spec_lengths)
#       print('check',log_det_j_sum.shape, m_p.shape)

      y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
      y_hat_mel = mel_spectrogram_torch(
          y_hat.squeeze(1), 
          hps.data.filter_length, 
          hps.data.n_mel_channels, 
          hps.data.sampling_rate, 
          hps.data.hop_length, 
          hps.data.win_length, 
          hps.data.mel_fmin, 
          hps.data.mel_fmax
      )

      y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice 
      
      # NDA is effective?
      batch_size=y.size(0)
      y_jig1 = y.view(batch_size,4,-1)
      rand_idx = torch.randperm(4)
      y_jig2 = y_jig1[:,rand_idx,:]
      y_jigsaw = y_jig2.view(batch_size,1,-1)
#             print(rand_idx)
      check_idx = torch.tensor([0,1,2,3])
      if (rand_idx ==check_idx).sum()==4:
          y_jigsaw = y_hat
      else:
          y_jigsaw = y_jigsaw
    
      y_negative = 0.75*y_hat + 0.25*y_jigsaw
    
    
      # Discriminator
      y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_negative.detach())
    
    
    
      with autocast(enabled=False):
        loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
        loss_disc_all = loss_disc
    optim_d.zero_grad()
    scaler.scale(loss_disc_all).backward()
    scaler.unscale_(optim_d)
    grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
    scaler.step(optim_d)

    with autocast(enabled=hps.train.fp16_run):
      # Generator
      y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
      with autocast(enabled=False):
        loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
        loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl

        loss_fm = feature_loss(fmap_r, fmap_g)
        loss_gen, losses_gen = generator_loss(y_d_hat_g)
        loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl
    optim_g.zero_grad()
    scaler.scale(loss_gen_all).backward()
    scaler.unscale_(optim_g)
    grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
    scaler.step(optim_g)
    scaler.update()

    if rank==0:
      if global_step % hps.train.log_interval == 0:
        lr = optim_g.param_groups[0]['lr']
        losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl]
        logger.info('Train Epoch: {} [{:.0f}%]'.format(
          epoch,
          100. * batch_idx / len(train_loader)))
        logger.info([x.item() for x in losses] + [global_step, lr])
        
        scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
        scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl})

        scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
        scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
        scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
        image_dict = { 
            "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
            "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), 
            "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
        }
        utils.summarize(
          writer=writer,
          global_step=global_step, 
          images=image_dict,
          scalars=scalar_dict)

      if global_step % hps.train.eval_interval == 0:
        evaluate(hps, net_g, eval_loader, writer_eval)
        utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
        utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
    global_step += 1
  
  if rank == 0:
    logger.info('====> Epoch: {}'.format(epoch))