Пример #1
0
    def __init__(self, opt, model, optimizer, start_iter, best_result=None):
        self.opt = opt
        self.model = model.cuda()
        self.optimizer = optimizer
        self.scheduler = get_schedular(optimizer, self.opt)
        self.criterion = get_criteria(self.opt)

        self.criterion = get_criteria(self.opt)

        self.output_directory = utils.get_save_path(self.opt)
        self.best_txt = os.path.join(self.output_directory, 'best.txt')
        self.logger = utils.get_logger(self.output_directory)
        opt.write_config(self.output_directory)

        self.st_iter, self.ed_iter = start_iter, self.opt.max_iter

        # data loader
        from dataloaders import create_loader
        self.train_loader = create_loader(self.opt, mode='train')
        self.eval_loader = create_loader(self.opt, mode='val')

        if best_result:
            self.best_result = best_result
        else:
            self.best_result = Result()
            self.best_result.set_to_worst()

        # train parameters
        self.iter_save = len(self.train_loader)
        # self.iter_save = len(self.train_loader)
        self.train_meter = AverageMeter()
        self.eval_meter = AverageMeter()
        self.metric = self.best_result.absrel
        self.result = Result()
Пример #2
0
def main():
    """Defining the main function"""
    logger = utils.get_logger(os.environ['DEBUG'])

    utils.db_connection()
    utils.get_news()

    logger.info('Started')

    while True:
        news.notify_news()
        time.sleep(30)
Пример #3
0
def run(args):
    start = time.time()
    logger = get_logger(
            os.path.join(args.checkpoint, 'separate.log'), file=True)
    
    dataset = Dataset(mix_scp=args.mix_scp, ref_scp=args.ref_scp, aux_scp=args.aux_scp)
    
    # Load model
    nnet_conf = load_json(args.checkpoint, "mdl.json")
    nnet = ConvTasNet(**nnet_conf)
    cpt_fname = os.path.join(args.checkpoint, "best.pt.tar")
    cpt = th.load(cpt_fname, map_location="cpu")
    nnet.load_state_dict(cpt["model_state_dict"]) 
    logger.info("Load checkpoint from {}, epoch {:d}".format(
        cpt_fname, cpt["epoch"]))
    
    device = th.device(
        "cuda:{}".format(args.gpuid)) if args.gpuid >= 0 else th.device("cpu")
    nnet = nnet.to(device) if args.gpuid >= 0 else nnet
    nnet.eval()
    
    with th.no_grad():
        total_cnt = 0
        for i, data in enumerate(dataset):    
            mix = th.tensor(data['mix'], dtype=th.float32, device=device)
            aux = th.tensor(data['aux'], dtype=th.float32, device=device) 
            aux_len = th.tensor(data['aux_len'], dtype=th.float32, device=device)
            key = data['key']
            
            if args.gpuid >= 0:
                mix = mix.cuda()
                aux = aux.cuda()
                aux_len = aux_len.cuda()
                
            # Forward            
            ests = nnet(mix, aux, aux_len)
            ests = ests.cpu().numpy()
            norm = np.linalg.norm(mix.cpu().numpy(), np.inf)
            ests = ests[:mix.shape[-1]]
            
            # for each utts
            logger.info("Separate Utt{:d}".format(total_cnt + 1))
            # norm
            ests = ests * norm / np.max(np.abs(ests))
            write_wav(os.path.join(args.dump_dir, key),
                      ests,
                      fs=args.fs)
            total_cnt += 1   
            break
    
    end = time.time()
    logger.info('Utt={:d} | Time Elapsed: {:.1f}s'.format(total_cnt, end-start))
Пример #4
0
    def __init__(self, opt, model, optimizer, start_iter, best_result=None):
        self.opt = opt

        self.model = DataParallelModel(model).float().cuda()
        self.optimizer = optimizer
        self.scheduler = get_schedular(optimizer, self.opt)

        self.criterion = DataParallelCriterion(get_criteria(self.opt)).cuda()
        self.evaluation = DataParallelEvaluation(EvaluationModule()).cuda()

        self.output_directory = utils.get_save_path(self.opt)
        self.best_txt = os.path.join(self.output_directory, 'best.txt')
        self.logger = utils.get_logger(self.output_directory)
        opt.write_config(self.output_directory)

        self.st_iter, self.ed_iter = start_iter, self.opt.max_iter

        self.train_loader = create_loader(self.opt, mode='train')
        self.eval_loader = create_loader(self.opt, mode='val')

        if best_result:
            self.best_result = best_result
        else:
            self.best_result = Result()
            self.best_result.set_to_worst()

        # train
        # self.iter_save = len(self.train_loader)
        self.iter_save = 50
        self.train_meter = AverageMeter()
        self.eval_meter = AverageMeter()
        self.metric = self.best_result.absrel
        self.result = Result()

        # batch size in each GPU
        self.ebt = self.opt.batch_size // torch.cuda.device_count()
Пример #5
0
#!/usr/bin/env python
# wujian@2018

import argparse

import numpy as np

from libs.data_handler import NumpyReader, NumpyWriter, parse_scps
from libs.utils import get_logger
from libs.opts import StrToBoolAction

logger = get_logger(__name__)


def run(args):
    numpy_reader = NumpyReader(args.npy_scp)

    spk2utt = parse_scps(args.spk2utt, num_tokens=-1) if args.spk2utt else None

    with NumpyWriter(args.dump_dir, args.scp) as writer:
        if spk2utt is None:
            for key, mat in numpy_reader:
                if mat.ndim != 2:
                    raise RuntimeError(
                        "--spk2utt is None, so input ndarray must be 2D, got {:d}"
                        .format(mat.ndim))
                if args.normalize:
                    mat = mat / np.linalg.norm(
                        mat, ord=2, axis=1, keepdims=True)
                writer.write(key, np.mean(mat, axis=0))
            logger.info("Processed {:d} speakers".format(len(numpy_reader)))
Пример #6
0
    def __init__(self,
                 nnet,
                 checkpoint="checkpoint",
                 optimizer="sgd",
                 gpuid=None,
                 optimizer_kwargs=None,
                 clip_norm=None,
                 min_lr=0,
                 patience=0,
                 factor=0.5,
                 logging_period=1000,
                 resume=None):
        if not th.cuda.is_available():
            raise RuntimeError("CUDA device unavailable...exist")
        if not isinstance(gpuid, tuple):
            gpuid = (gpuid, )
        self.device = th.device("cuda:{}".format(gpuid[0]))
        self.gpuid = gpuid
        if checkpoint and not os.path.exists(checkpoint):
            os.makedirs(checkpoint)
        self.checkpoint = checkpoint
        self.logger = get_logger(os.path.join(checkpoint, "trainer.log"),
                                 file=True)

        self.clip_norm = clip_norm
        self.logging_period = logging_period
        self.cur_epoch = 0  # zero based

        if resume:
            if not os.path.exists(resume):
                raise FileNotFoundError(
                    "Could not find resume checkpoint: {}".format(resume))
            cpt = th.load(resume, map_location="cpu")
            self.cur_epoch = cpt["epoch"]
            self.logger.info("Resume from checkpoint {}: epoch {:d}".format(
                resume, self.cur_epoch))
            # load nnet
            nnet.load_state_dict(cpt["model_state_dict"])
            self.nnet = nnet.to(self.device)
            # load ge2e
            ge2e_loss = GE2ELoss()
            ge2e_loss.load_state_dict(cpt["ge2e_state_dict"])
            self.ge2e = ge2e_loss.to(self.device)
            self.optimizer = self.create_optimizer(
                optimizer, optimizer_kwargs, state=cpt["optim_state_dict"])
        else:
            self.nnet = nnet.to(self.device)
            ge2e_loss = GE2ELoss()
            self.ge2e = ge2e_loss.to(self.device)
            self.optimizer = self.create_optimizer(optimizer, optimizer_kwargs)
        self.scheduler = ReduceLROnPlateau(self.optimizer,
                                           mode="min",
                                           factor=factor,
                                           patience=patience,
                                           min_lr=min_lr,
                                           verbose=True)
        self.num_params = sum(
            [param.nelement() for param in nnet.parameters()]) / 10.0**6

        # logging
        self.logger.info("Model summary:\n{}".format(nnet))
        self.logger.info("Loading model to GPUs:{}, #param: {:.2f}M".format(
            gpuid, self.num_params))
        if clip_norm:
            self.logger.info(
                "Gradient clipping by {}, default L2".format(clip_norm))
Пример #7
0
    elif not opt.no_eval:
        evaluator = get_evaluator(opt, student)
        evaluate(evaluator, logger=logger)


if __name__ == '__main__':
    opt = opts().parse()

    print("RUNDIR: {}".format(opt.run.save_path))
    if not os.path.exists(opt.run.save_path):
        os.makedirs(opt.run.save_path)
    shutil.copy(opt.config_file, opt.run.save_path)

    # Setup logger
    writer = SummaryWriter(log_dir=opt.run.save_path)
    logger = get_logger(opt.run.save_path)
    log_str = "Starting Experiment {}".format(opt.run.exp_id)
    print(log_str)
    logger.info(log_str)

    # Setup seeds
    torch.manual_seed(opt.run.manual_seed)
    torch.cuda.manual_seed(opt.run.manual_seed)
    np.random.seed(opt.run.manual_seed)
    random.seed(opt.run.manual_seed)

    # Setup device
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        [str(gpu) for gpu in opt.run.gpus])
    if len(opt.run.gpus) > 0:
        cudnn.benchmark = True
Пример #8
0
def evaluate(args):
    start = time.time()
    total_SISNR = 0
    total_SDR = 0
    total_cnt = 0

    # build the logger object
    logger = get_logger(os.path.join(args.checkpoint, 'eval.log'), file=True)

    # Load model
    nnet_conf = load_json(args.checkpoint, "mdl.json")
    nnet = ConvTasNet(**nnet_conf)
    cpt_fname = os.path.join(args.checkpoint, "best.pt.tar")
    cpt = th.load(cpt_fname, map_location="cpu")
    nnet.load_state_dict(cpt["model_state_dict"])
    logger.info("Load checkpoint from {}, epoch {:d}".format(
        cpt_fname, cpt["epoch"]))

    device = th.device("cuda:{}".format(
        args.gpuid)) if args.gpuid >= 0 else th.device("cpu")
    nnet = nnet.to(device) if args.gpuid >= 0 else nnet
    nnet.eval()

    # Load data
    dataset = Dataset(mix_scp=args.mix_scp,
                      ref_scp=args.ref_scp,
                      aux_scp=args.aux_scp)

    with th.no_grad():
        for i, data in enumerate(dataset):
            mix1 = th.tensor(data['mix1'], dtype=th.float32, device=device)
            mix2 = th.tensor(data['mix2'], dtype=th.float32, device=device)
            aux = th.tensor(data['aux'], dtype=th.float32, device=device)
            aux_len = th.tensor(data['aux_len'],
                                dtype=th.float32,
                                device=device)

            if args.gpuid >= 0:
                mix1 = mix1.cuda()
                mix2 = mix2.cuda()
                aux = aux.cuda()
                aux_len = aux_len.cuda()

            # Forward
            ref = data['ref']
            ests, _ = nnet(mix1, mix2, aux, aux_len)
            ests = ests.cpu().numpy()
            if ests.size != ref.size:
                end = min(ests.size, ref.size)
                ests = ests[:end]
                ref = ref[:end]

            # for each utts
            # Compute SDRi
            if args.cal_sdr:
                SDR, sir, sar, popt = bss_eval_sources(ref, ests)
                # avg_SDRi = cal_SDRi(src_ref, src_est, mix)
                total_SDR += SDR[0]
            # Compute SI-SNR
            SISNR = cal_SISNR(ests, ref)
            if args.cal_sdr:
                logger.info("Utt={:d} | SDR={:.2f} | SI-SNR={:.2f}".format(
                    total_cnt + 1, SDR[0], SISNR))
            else:
                logger.info("Utt={:d} | SI-SNR={:.2f}".format(
                    total_cnt + 1, SISNR))
            total_SISNR += SISNR
            total_cnt += 1
    end = time.time()

    logger.info('Time Elapsed: {:.1f}s'.format(end - start))
    if args.cal_sdr:
        logger.info("Average SDR: {0:.2f}".format(total_SDR / total_cnt))
    logger.info("Average SI-SNR: {:.2f}".format(total_SISNR / total_cnt))
Пример #9
0
#!/usr/bin/env python3

# NOTE: this example requires PyAudio because it uses the Microphone class
import subprocess
import time

import speech_recognition as sr
from gtts import gTTS
import pygame

from libs import utils

log = utils.get_logger(__name__)
r = sr.Recognizer()

pygame.mixer.init()


def speech_to_text(timeout=15, phrase_time_limit=10):
    with sr.Microphone() as source:
        try:
            audio = r.listen(source,
                             timeout=timeout,
                             phrase_time_limit=phrase_time_limit)
        except sr.WaitTimeoutError as e:
            log.info('Timed out')
            return False

        try:
            return r.recognize_google(audio)
Пример #10
0
    if CHECK_LATEST_CODEQL_CLI:
        setup_script_args += ' --check-latest-cli'
    if CHECK_LATEST_QUERIES:
        setup_script_args += ' --check-latest-queries'
    if PRECOMPILE_QUERIES:
        setup_script_args += ' --precompile-latest-queries'

    run_result = check_output_wrapper(
        f"{scripts_dir}/setup.py {setup_script_args}",
        shell=True).decode("utf-8")

    # what command did the user ask to run?
    if CODEQL_CLI_ARGS == False or CODEQL_CLI_ARGS == None or CODEQL_CLI_ARGS == ' ':
        # nothing to do
        logger.info(
            "No argument passed in for codeql-cli, nothing to do. To perform some task, please set the CODEQL_CLI_ARGS environment variable to a valid argument..."
        )
    else:
        codeql = CodeQL(CODEQL_HOME)
        run_result = codeql.execute_codeql_command(CODEQL_CLI_ARGS)
        print(run_result)

    if WAIT_AFTER_EXEC:
        logger.info("Wait forever specified, waiting...")
        while True:
            sleep(10)


logger = get_logger()
main()