Exemplo n.º 1
0
def generate(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    vocab = utils.load_pkl(args.vocab)

    logging.info("Initializing generation environment...")
    model = prepare_model(args, vocab)
    model = utils.to_device(model, devices)
    generator = Generator(
        model=model,
        device=devices[0],
        batch_size=args.batch_size,
        vocab=vocab,
        bos=args.bos,
        eos=args.eos,
        unk=args.unk,
        max_len=args.max_length
    )

    logging.info("Commencing generation...")
    samples = generator.generate(args.z_samples)
    if args.nearest_neighbors is not None:
        dataset = prepare_dataset(args, vocab)
        neighbors = nearest_neighbors(args, samples, dataset)
    else:
        neighbors = None
    save(args, samples, neighbors)

    logging.info("Done!")
Exemplo n.º 2
0
    def get_summary(self):
        res = []
        path_dir = self.mars_config.get_base_path()
        snap_times_list = os.listdir(path_dir)
        snap_times_list.sort(reverse=True)

        for snap_time in snap_times_list:
            try:
                time_stamp = int(snap_time)
                time_str = time.strftime('%Y-%m-%d %H:%M:%S',
                                         time.localtime(time_stamp))
                cur_summary = {
                    'time':
                    time_str,
                    GROUPS_NAME:
                    len(get_group(self.mars_config, snap_time)),
                    DEVICE_NAME:
                    len(get_devices(self.mars_config, snap_time)),
                    DEVICE_CONFIG_NAME:
                    len(get_devices_configs(self.mars_config, snap_time)),
                    HOSTS_NAME:
                    len(get_host(self.mars_config, snap_time)),
                    LINKS_NAME:
                    len(get_link(self.mars_config, snap_time)),
                    FLOW_NAME:
                    len(get_flow(self.mars_config, snap_time)),
                }
                res.append(cur_summary)
            except ValueError as e:
                pass

        return res
Exemplo n.º 3
0
def main():
    # Setup argument parsing
    parser = argparse.ArgumentParser(description='Monitor bluetooth igrill devices, and export to MQTT')
    parser.add_argument('-c', '--config', action='store', dest='config_directory', default='.',
                        help='Set config directory, default: \'.\'')
    parser.add_argument('-l', '--log-level', action='store', dest='log_level', default='INFO',
                        help='Set log level, default: \'info\'')
    parser.add_argument('-d', '--log-destination', action='store', dest='log_destination', default='',
                        help='Set log destination (file), default: \'\' (stdout)')
    options = parser.parse_args()
    config = read_config(options.config_directory)

    # Setup logging
    log_setup(options.log_level, options.log_destination)

    # Get device list
    devices = get_devices(config['devices'])

    # Connect to MQTT
    client = mqtt_init(config['mqtt'])
    base_topic = config['mqtt']['base_topic']

    polling_interval = config['interval'] if 'interval' in config else 15

    while True:
        for device in devices:
            publish(device.read_temperature(), device.read_battery(), client, base_topic, device.name)

        time.sleep(polling_interval)
Exemplo n.º 4
0
def train(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    dataloader = create_dataloader(args)
    vocab = dataloader.dataset.vocab
    utils.save_pkl(vocab, os.path.join(args.save_dir, "vocab.pkl"))

    logging.info("Initializing training environment...")
    mdl = prepare_model(args, dataloader)
    optimizer_cls = get_optimizer_cls(args)
    trainer = Trainer(model=utils.to_device(mdl, devices),
                      device=devices[0],
                      vocab=vocab,
                      epochs=args.epochs,
                      save_dir=args.save_dir,
                      save_period=args.save_period,
                      optimizer_cls=optimizer_cls,
                      tensor_key="tensor",
                      samples=args.samples,
                      show_progress=args.show_progress,
                      kld_annealing=args.kld_annealing,
                      dynamic_rnn=mdl.encoder.rnn.dynamic
                      or mdl.decoder.rnn.dynamic)
    report_model(trainer)

    logging.info("Commecing training...")
    trainer.train(dataloader)

    logging.info("Done!")
Exemplo n.º 5
0
def initialize(param_runDir, cuda_id):

    runtime_path = Path(param_runDir).resolve()

    logger.handlers.clear()
    # set logger
    log_file = f"demo_log.log"
    logger.set_log_to_stream()
    logger.set_log_to_file(runtime_path.joinpath(log_file))

    # print versions after logger.set_log_to_file() to log them into file
    print_versions()
    #logger.info(f"runtime commit: {get_commit()}")
    logger.info(f"runtime path: {runtime_path}")

    random_seed = 20  #for Pytorch 1.2.0 + DenseNet121 #basic
    logger.info(f"random seed for reproducible: {random_seed}")
    torch.manual_seed(random_seed)
    np.random.seed(random_seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # check gpu device
    device = get_devices(cuda_id)

    return runtime_path, device
Exemplo n.º 6
0
def check_distributed(args):
    devices = get_devices(args.cuda)
    if args.local_rank is None:
        return False, devices[0]
    else:
        device = devices[args.local_rank % len(devices)]
        torch.cuda.set_device(device)
        logger.info(f"waiting other ranks ...")
        dist.init_process_group(backend="nccl", init_method="env://")
        logger.info(f"set world_size to {dist.get_world_size()}")
        return True, device
Exemplo n.º 7
0
    def generate_commands(self, rebalance=True, meta=None):
        commands = []
        ring_disks = get_devices(self.zones, metadata=meta)

        for ringtype in RING_TYPES:
            builder_present = os.path.exists("%s/%s.builder" %
                                             (self.workspace, ringtype))
            if not builder_present:
                commands.append(self._ring_create_command(ringtype))

            for zone, devices in ring_disks[ringtype].iteritems():
                for device in devices:
                    port = self.ports[ringtype]
                    weight = device['weight']
                    disk = device['device']
                    node = device['ip']
                    metadata = device['metadata']
                    # When rings are not present or if device does not
                    # exist in ring, add it to the ring
                    # Else if the weight is to be set to 0 remove
                    # the device eor just update the weight
                    if not builder_present or \
                       not self._is_devpresent(ringtype, zone, node,
                                               port, disk):
                        cmd = self._ring_add_command(ringtype, zone,
                                                     node, port,
                                                     disk, metadata,
                                                     weight)
                    else:
                        if int(weight) == 0:
                            cmd = self._ring_remove_command(ringtype,
                                                            zone, node,
                                                            port, disk)
                        else:
                            # Always set the weight of device
                            # Verified that setting weight (to same)
                            # value doesnt cause partitions to reassign
                            cmd = self._ring_setweight_command(ringtype,
                                                               zone,
                                                               node,
                                                               port,
                                                               disk,
                                                               weight)
                    commands.append(cmd)
            if rebalance:
                commands.append(self._ring_rebalance_command(ringtype))
        return commands
Exemplo n.º 8
0
def main():
    apk = sys.argv[1]
    package = sys.argv[2]
    instructions_file = sys.argv[3]
    print(instructions_file)
    print('Checking adb devices')
    devices = get_devices()
    print('Devices found: '+str(devices))
    if not len(devices):
        print('No devices found, setting up emulator')
        run_emulator()
        print('Emulator boot completed.. proceding..')

    device = MonkeyRunner.waitForConnection()
    easy_device = EasyMonkeyDevice(device)
    print('Connected\nInstalling package..')
    device.installPackage(apk)
    print('Installed!')
    print('Checking all activities..\nThis may take a while..')
    f = open(instructions_file, 'r')
    instructions = json.load(f)
    for activity in instructions:
        print(activity)
        runComponent = package + '/' + activity
        for button in instructions[activity]:
            device.startActivity(component=runComponent)
            time.sleep(1)
            if easy_device.visible(By.id('id/'+button)):
                easy_device.touch(By.id('id/'+button), MonkeyDevice.DOWN_AND_UP)
                time.sleep(1)
            else:
                device.press("KEYCODE_BACK", MonkeyDevice.DOWN_AND_UP)
                time.sleep(1)
                if easy_device.visible(By.id('id/'+button)):
                    easy_device.touch(By.id('id/'+button), MonkeyDevice.DOWN_AND_UP)

        result = device.takeSnapshot()
        result_path = os.path.join(os.path.abspath('monkeyresult/'), package)
        if not os.path.exists(result_path):
            os.makedirs(result_path)
        result.writeToFile(
            os.path.join(result_path, activity+'.png'),
            'png'
            )
    f.close()
    print('Saved some snapshots to\n'+result_path)
Exemplo n.º 9
0
    def generate_commands(self, rebalance=True, meta=None):
        commands = []
        ring_disks = get_devices(self.zones, metadata=meta)

        for ringtype in RING_TYPES:
            builder_present = os.path.exists("%s/%s.builder" %
                                             (self.workspace, ringtype))
            if not builder_present:
                commands.append(self._ring_create_command(ringtype))

            for zone, devices in ring_disks[ringtype].iteritems():
                for device in devices:
                    port = self.ports[ringtype]
                    weight = device['weight']
                    disk = device['device']
                    node = device['ip']
                    metadata = device['metadata']
                    # When rings are not present or if device does not
                    # exist in ring, add it to the ring
                    # Else if the weight is to be set to 0 remove
                    # the device eor just update the weight
                    if not builder_present or \
                       not self._is_devpresent(ringtype, zone, node,
                                               port, disk):
                        cmd = self._ring_add_command(ringtype, zone, node,
                                                     port, disk, metadata,
                                                     weight)
                    else:
                        if int(weight) == 0:
                            cmd = self._ring_remove_command(
                                ringtype, zone, node, port, disk)
                        else:
                            # Always set the weight of device
                            # Verified that setting weight (to same)
                            # value doesnt cause partitions to reassign
                            cmd = self._ring_setweight_command(
                                ringtype, zone, node, port, disk, weight)
                    commands.append(cmd)
            if rebalance:
                commands.append(self._ring_rebalance_command(ringtype))
        return commands
Exemplo n.º 10
0
def predict(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    vocab_paths = [getattr(args, f"{mode}_vocab") for mode in MODES]
    vocabs = [utils.load_pkl(v) for v in vocab_paths]
    test_dataset = prepare_dataset(args, vocabs[0])
    test_dataloader = td.DataLoader(
        dataset=test_dataset,
        batch_size=args.batch_size,
        num_workers=args.data_workers,
        collate_fn=dataset.TextSequenceBatchCollator(
            pad_idxs=[len(v) for v in vocabs]))

    logging.info("Initializing generation environment...")
    model, vocabs[0] = prepare_model(args, vocabs)
    model = utils.to_device(model, devices)
    predictor = Predictor(
        model=model,
        device=devices[0],
        batch_size=args.batch_size,
        sent_vocab=vocabs[0],
        label_vocab=vocabs[1],
        intent_vocab=vocabs[2],
        bos=args.bos,
        eos=args.eos,
        unk=args.unk,
        beam_size=args.beam_size,
    )

    logging.info("Commencing prediction...")
    with torch.no_grad():
        (labels, intents), (pl, pi) = predictor.predict(test_dataloader)
    report_stats(args, labels, intents, pl, pi)
    save(args, labels, intents, pl, pi)

    logging.info("Done!")
Exemplo n.º 11
0
def generate(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    vocab_paths = [getattr(args, f"{mode}_vocab") for mode in MODES]
    vocabs = [utils.load_pkl(v) for v in vocab_paths]
    test_dataset = prepare_dataset(args, vocabs[0])
    test_dataloader = td.DataLoader(
        dataset=test_dataset,
        batch_size=args.batch_size,
        num_workers=args.data_workers,
        collate_fn=dataset.TextSequenceBatchCollator(
            pad_idxs=[len(v) for v in vocabs]))

    logging.info("Initializing generation environment...")
    model, vocabs[0] = prepare_model(args, vocabs)
    model.beam_size = args.beam_size
    model = utils.to_device(model, devices)
    predictor = PredictorWithProgress(model=model,
                                      device=devices[0],
                                      vocabs=vocabs,
                                      progress=args.show_progress,
                                      bos=args.bos,
                                      eos=args.eos,
                                      unk=args.unk,
                                      topk=args.top_k)

    logging.info("Commencing prediction...")
    with torch.no_grad():
        (labels, pl), (intents, pi) = predictor.predict(test_dataloader)
    labels, intents = [l[0] for l in labels], [i[0] for i in intents]
    pl, pi = [p[0] for p in pl], [p[0] for p in pi]
    report_stats(args, labels, intents, pl, pi)
    save(args, labels, intents, pl, pi)

    logging.info("Done!")
Exemplo n.º 12
0
    def __init__(self, run_callback):
        super().__init__()
        self.left = 200
        self.top = 200
        self.width = 400
        self.height = 120
        self.run_callback = run_callback
        self.custom_state_rows = []

        self.input_dropdown = None
        self.output_dropdown = None
        self.playback_dropdown = None

        self.devices = get_devices()
        self.layout = QFormLayout()

        self.selected_devices = {
            "input_device_index": None,
            "output_device_index": None,
            "playback_device_index": None
        }

        self.init_ui()
Exemplo n.º 13
0
    def __init__(self, config, train_loader, valid_loader, test_loader):
        """
        Construct a new Trainer instance.

        Args
        ----
        - config: object containing command line arguments.
        - data_loader: data iterator
        """

        # DATA PARAMS
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.test_loader = test_loader

        self.test_script = config.test_script
        if self.test_script:
            try:
                if config.train:
                    self.train_loader = DataLoader(
                        self.train_loader.sampler.data_source.
                        data[:250 * config.batch_size],
                        batch_size=config.batch_size)

                self.valid_loader = DataLoader(
                    self.valid_loader.sampler.data_source.
                    data[:3 * config.batch_size],
                    batch_size=config.batch_size)
            except AttributeError:
                if config.train:
                    self.train_loader = DataLoader(
                        self.train_loader.sampler.data_source.dataset.
                        data[:10 * config.batch_size],
                        batch_size=config.batch_size)

                self.valid_loader = DataLoader(
                    self.valid_loader.sampler.data_source.dataset.
                    data[:3 * config.batch_size],
                    batch_size=config.batch_size)

            self.test_loader = self.valid_loader

        self.num_train = len(self.train_loader.dataset) if train_loader else 0
        self.num_valid = len(self.valid_loader.dataset) if valid_loader else 0
        self.num_test = len(self.test_loader.dataset) if test_loader else 0
        self.num_classes = config.num_classes
        _batch = next(iter(self.test_loader))
        self.input_size = utils.infer_input_size(_batch)

        # TRAINING PARAMS
        self.epochs = config.epochs
        self.start_epoch = 0
        self.momentum = config.momentum
        self.lr = config.init_lr
        self.weight_decay = config.weight_decay
        self.nesterov = config.nesterov
        self.gamma = config.gamma
        self.lr_step = config.lr_step
        self.lambda_a = config.lambda_a
        self.lambda_b = config.lambda_b
        self.temperature = config.temperature
        self.scale_dml = config.scale_dml

        # MISCELLANEOUS PARAMS
        self.model_num = len(config.model_names)
        self.counter = 0
        self.use_wandb = config.use_wandb
        self.use_sweep = config.use_sweep
        self.progress_bar = config.progress_bar
        self.experiment_level = config.experiment_level
        self.experiment_name = config.experiment_name
        self.level_name = config.level_name
        self.unlabelled = bool(config.unlabel_split)
        self.discard_unlabelled = config.discard_unlabelled

        # MODELS AND MODEL ATTRIBUTES
        self.model_names = config.model_names
        self.indexed_model_names = [
            f'({i})_{model_name}'
            for i, model_name in enumerate(self.model_names, 1)
        ]
        self.use_gpu = (not config.disable_cuda and torch.cuda.is_available())
        self.devices = utils.get_devices(self.model_num, self.use_gpu)
        self.nets = utils.model_init_and_placement(self.model_names,
                                                   self.devices,
                                                   self.input_size,
                                                   self.num_classes)

        # LOSS FUNCTIONS
        self.loss_kl = nn.KLDivLoss(reduction='batchmean')
        self.loss_ce = nn.CrossEntropyLoss(reduction='none')

        # KEEP TRACK OF THE VALIDATION ACCURACY
        self.best_valid_accs = [-0.1] * self.model_num
        self.best_mean_valid_acc = -0.1

        # LEARNING SIGNAL CONDITIONNS ADN FRACTIONS
        # if lambda b = 1. (which it always should be for the first level)
        # the kd part is disabled and should therefore not be logged
        self.kd_condition = all(
            [bool(1 - self.lambda_b), self.experiment_level > 1])
        self.kd_fraction = (1 - self.lambda_b) * self.lambda_a

        # deep mutual learning can only be done on a set of models
        # the dml is therefore disabled for model_num <= 1 (for a kd experiment for instance)
        self.dml_condition = any(
            [self.model_num >= 2,
             bool(self.lambda_b),
             bool(self.lambda_a)])
        self.dml_fraction = self.lambda_a * self.lambda_b

        # if both previous conditions are False, the sl_signal is equal to the overall loss,
        # therefore we do not need the sl_signal explicitely
        self.sl_condition = any([self.dml_condition, self.kd_condition])
        self.sl_fraction = 1 - self.lambda_a

        # CONFIGURE WEIGHTS & BIASES LOGGING AND SAVE DIR
        self.experiment_dir = utils.prepare_dirs(self.experiment_name,
                                                 self.level_name)
        if self.use_wandb:
            wandb.init(name=self.level_name,
                       project=self.experiment_name,
                       dir=self.experiment_dir,
                       config=config,
                       id=self.level_name,
                       tags=list(set(config.model_names)))
            wandb.log({
                'sl fraction': self.sl_fraction,
                'kd fraction': self.kd_fraction,
                'dml fraction': self.dml_fraction
            })
            if self.use_sweep:
                _config = wandb.config
                for param in config.hp_search:
                    print(param, getattr(_config, param))
                    setattr(self, param, getattr(_config, param))

        # INITIALIZE OPTIMIZER & SCHEDULER AND LOG THE MODEL DESCRIPTIONS
        model_stats = []
        self.optimizers = []
        self.schedulers = []
        for i, net in enumerate(self.nets):

            optimizer = torch.optim.SGD(net.parameters(),
                                        lr=self.lr,
                                        momentum=self.momentum,
                                        weight_decay=self.weight_decay,
                                        nesterov=self.nesterov)

            self.optimizers.append(optimizer)

            # set learning rate decay
            scheduler = torch.optim.lr_scheduler.StepLR(self.optimizers[i],
                                                        step_size=self.lr_step,
                                                        gamma=self.gamma,
                                                        last_epoch=-1)

            self.schedulers.append(scheduler)

            model_name = self.model_names[i]
            architecture, size_indicator = utils.infer_model_desc(model_name)
            params = sum([p.data.nelement() for p in net.parameters()])
            print('[*] Number of parameters of {} model: {:,}'.format(
                model_name, params))
            model_stats.append([
                model_name, architecture, size_indicator,
                f'{params:,}'.replace(',', '.')
            ])
        if self.use_wandb:
            wandb.log({
                "examples":
                wandb.Table(data=model_stats,
                            columns=[
                                "Model name", "Architecture", "Size indicator",
                                "# params"
                            ])
            })
    def __init__(self,
                 dataset,
                 num_classes,
                 batch_size,
                 train,
                 model_num,
                 teachers=[],
                 unlabel_split=.0,
                 use_gpu=False,
                 progress_bar=True):

        self.data = []
        self.teacher_num = len(teachers)

        devices = get_devices(model_num, use_gpu)

        if unlabel_split:
            len_ds = len(dataset)
            all_indices = range(len_ds)
            subset_indices = set(random.sample(
                all_indices, int(len_ds * unlabel_split)))
            counter = 0

        if progress_bar:
            pbar = tqdm(total=len(dataset), smoothing=.005)

        if teachers:
            accuracies = []
            # prepare and assert the teachers behaviour
            _batch = dataset[0][0].unsqueeze(0)
            for teacher, device in zip(teachers, devices):
                teacher = teacher.to(device)
                batch = _batch.clone().to(device)
                teacher.eval()
                sample_classes = teacher(batch).shape[1]
                assert sample_classes == num_classes, f"Num classes of the output is {sample_classes}, {num_classes} required"
                accuracies.append(RunningAverageMeter())

            # create the psuedo labels
            with torch.no_grad():
                for image, label in dataset:
                    unlabelled = 1
                    if unlabel_split:
                        if counter in subset_indices:
                            unlabelled = 0
                        counter += 1
                    _psuedo_labels = []
                    for i, (teacher, device) in enumerate(zip(teachers, devices)):
                        if use_gpu:
                            image = image.to(device)
                        # add dimension to comply with the desired input dimension (batch of single image)
                        pred = teacher(image.unsqueeze(0)).cpu()
                        _psuedo_labels.append(pred)

                        # keep track of the accuracy of the teacher model
                        acc_at_1 = accuracy(
                            pred,
                            torch.tensor([[label]]),
                            topk=(1,))[0]
                        accuracies[i].update(acc_at_1.item())

                    image = image.cpu()
                    psuedo_labels = torch.stack(_psuedo_labels, -1).squeeze(0)
                    self.data.append((image, label, psuedo_labels, unlabelled))

                    if progress_bar:
                        pbar.update(1)

            if progress_bar:
                pbar.close()

            print(
                f"Accurcies of loaded models are {' ,'.join([str(round(acc.avg, 2))+'%' for acc in accuracies])}, respectively")

        else:
            dummy_psuedo_label = torch.empty(num_classes, model_num)
            for image, label in dataset:
                unlabelled = 1
                if unlabel_split:
                    if counter in subset_indices:
                        unlabelled = 0
                    counter += 1
                self.data.append(
                    (image, label, dummy_psuedo_label, unlabelled)
                )
                if progress_bar:
                    pbar.update(1)

            if progress_bar:
                pbar.close()
Exemplo n.º 15
0
def generate(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    vocab_paths = [args.word_vocab, args.label_vocab, args.intent_vocab]
    vocabs = [utils.load_pkl(v) for v in vocab_paths]
    dataloader = None

    logging.info("Initializing generation environment...")
    model, vocabs[0] = prepare_model(args, vocabs)
    model = utils.to_device(model, devices)
    encoder = encode.Encoder(model=model,
                             device=devices[0],
                             batch_size=args.batch_size)
    generator = Generator(model=model,
                          device=devices[0],
                          batch_size=args.batch_size,
                          sent_vocab=vocabs[0],
                          label_vocab=vocabs[1],
                          intent_vocab=vocabs[2],
                          bos=args.bos,
                          eos=args.eos,
                          unk=args.unk,
                          max_len=args.max_length,
                          beam_size=args.beam_size,
                          beam_topk=args.beam_sample_topk,
                          validate=args.validate)

    logging.info("Commencing generation...")
    if args.generation_type in {"posterior", "uniform"}:
        if dataloader is None:
            dataloader = create_dataloader(args, vocabs)
    sampler = utils.map_val(args.generation_type, {
        "gaussian":
        lambda: None,
        "posterior":
        lambda: MultivariateGaussianMixtureSampler(
            *encoder.encode(dataloader), scale=args.posterior_sampling_scale),
        "uniform":
        lambda: UniformNoiseSampler(encoder.encode(dataloader)[0],
                                    pa=args.uniform_sampling_pa,
                                    pm=args.uniform_sampling_pm)
    },
                            name="sampler")()
    with torch.no_grad():
        gens, probs = generator.generate(args.samples, sampler)
    if args.nearest_neighbors is not None:
        if dataloader is None:
            dataloader = create_dataloader(args, vocabs)
        sents = [data["string"][0] for data in dataloader.dataset]
        searcher = neighbor.PyTorchPCASearcher(
            pca_dim=100,
            sents=sents,
            num_neighbors=args.nearest_neighbors,
            batch_size=args.nearest_neighbors_batch_size,
            device=devices[0])
        neighbors = searcher.search(gens[0])
    else:
        neighbors = None
    report_stats(args, gens[0], neighbors)
    save(args, gens, probs, neighbors)

    logging.info("Done!")
def graph(args):
    
    flnA,flnB,flnC = args['inputA'],args['inputB'],args['inputC']
    typeA,wA= args['pA'][0],float(args['pA'][1])
    typeB,wB= args['pB'][0],float(args['pB'][1])
    if flnC is not None: typeC,wC= args['pC'][0],args['pC'][1]
    
    inputA, sr = librosa.load(args['audio_path']+flnA, sr=args['sr'], mono=True)
    inputB, sr = librosa.load(args['audio_path']+flnB, sr=args['sr'], mono=True)

    if flnC is not None: 
        inputC, sr = librosa.load(args['audio_path']+flnC, sr=args['sr'], mono=True)
    else: 
        inputC = None

    # segment beats
    proc = Segmentation(args,inputA,flnA,inputB,flnB,inputC,flnC)    
    inputA, inputB, inputC, xA_seg, xB_seg, xC_seg, x_seg, beat_channels = proc.get_segments()
    
    if len(x_seg[0]) // args['hoplen'] < args['k_h']: args['k_h'] = int(round(len(x_seg[0]) // args['hoplen']))
    
    n_channels = args['nfft'] // 2 + 1
    kernel_seg = utils.weight_fn( (1, args['k_h'], n_channels, args['n_filters_stft']))
    kernel_odf = utils.weight_fn( (1, 1, n_channels, args['n_filters_odf']) )
    
    dft_real_kernels, dft_imag_kernels = utils.get_stft_kernels(args['nfft'])
    
    # allocate cpu/gpu for compute
    config, devices, input_dev, seg_dev, odf_dev, output_dev = utils.get_devices()
        
    segcount=0
    result,res=[],[]
    for xA_beat_channel,xB_beat_channel,xC_beat_channel,x_beat_channel in beat_channels[:args['num_beat_segs']]:
        
        seglen =min(len(xA_beat_channel),len(xB_beat_channel),len(xC_beat_channel),len(x_beat_channel))
        print 'Transforming segment {}/{}'.format(segcount,len(beat_channels))
        segcount+=1
        
        xA_beat_channel=xA_beat_channel[:seglen]
        xB_beat_channel=xB_beat_channel[:seglen]
        xC_beat_channel=xC_beat_channel[:seglen]
        x_beat_channel = x_beat_channel[:seglen]
        
        # initalize segments
        g=tf.Graph()
        with g.as_default(), g.device(input_dev), tf.Session(config=config) as sess:
            beta_stft = tf.placeholder(tf.float32,shape=(),name='beta_stft')
            
            dft_real_kernels_tf = tf.constant(dft_real_kernels, name="dft_real_kernels", dtype='float32')
            dft_imag_kernels_tf = tf.constant(dft_imag_kernels, name="dft_imag_kernels", dtype='float32')
            
            # input A
            xA_seg_raw = np.ascontiguousarray(xA_beat_channel[None,:,None,None])
            xA_seg_raw = tf.constant(xA_seg_raw, name='xA_seg_raw', dtype='float32')
            _, xA_seg_mag, xA_seg = utils.get_logmagnitude_STFT(xA_seg_raw, dft_real_kernels_tf, dft_imag_kernels_tf, args['hoplen'])
            
            # input B
            xB_seg_raw = np.ascontiguousarray(xB_beat_channel[None,:,None,None])
            xB_seg_raw = tf.constant(xB_seg_raw, name='xB_seg_raw', dtype='float32')
            _, xB_seg_mag, xB_seg = utils.get_logmagnitude_STFT(xB_seg_raw, dft_real_kernels_tf, dft_imag_kernels_tf, args['hoplen'])
            
            # input C
            xC_seg_raw = np.ascontiguousarray(xC_beat_channel[None,:,None,None])
            xC_seg_raw = tf.constant(xC_seg_raw, name='xC_seg_raw', dtype='float32')
            _, xC_seg_mag, xC_seg = utils.get_logmagnitude_STFT(xC_seg_raw, dft_real_kernels_tf, dft_imag_kernels_tf, args['hoplen'])
            
            # optimizable variable x
            x_beat_channel = np.ascontiguousarray(x_beat_channel[None,:,None,None]).astype(np.float32)
            x_seg_raw = tf.Variable(x_beat_channel,name='x_seg_raw')
            _, x_seg_mag, x_seg = utils.get_logmagnitude_STFT(x_seg_raw, dft_real_kernels_tf, dft_imag_kernels_tf, args['hoplen'])

            # compute features
            with g.device(seg_dev):
                
                kernel_seg_tf = tf.constant(kernel_seg, name='kernel', dtype='float32')
                
                # optimizable var net
                conv = tf.nn.conv2d(x_seg,kernel_seg_tf,padding="VALID",strides=[1,1,1,1],name="conv_x_")
                net = utils.selu(conv)
                
                # net A
                segA_conv = tf.nn.conv2d(xA_seg,kernel_seg_tf,padding="VALID",strides=[1,1,1,1],name="convA_"+typeA)
                segA_net = utils.selu(segA_conv)
                
                if typeA == 'style':
                    gramA, style_gramA = compute_gram(net, segA_net)
                    segA_loss = compute_style_loss(gramA, style_gramA)
                elif typeA == 'content':
                    segA_loss = compute_content_loss(net, segA_net)
                
                # net B
                seg_B_conv =tf.nn.conv2d(xB_seg,kernel_seg_tf,padding='VALID',strides=[1,1,1,1],name='convB_'+typeB)            
                segB_net = utils.selu(seg_B_conv)
                
                if typeB == 'style':
                    gramB, style_gramB = compute_gram(net, segB_net)
                    segB_loss = compute_style_loss(gramB, style_gramB)
                elif typeB == 'content':
                    segB_loss = compute_content_loss(net, segB_net)
                
                # net C
                if inputC is not None:
                    segC_conv=tf.nn.conv2d(xC_seg,kernel_seg_tf,padding="VALID",strides=[1,1,1,1],name="convC_"+typeC)
                    segC_net = utils.selu(segC_conv)
                    
                    if typeC == 'style':
                        gramC, style_gramC = compute_gram(net, segC_net)
                        segC_loss = compute_style_loss(gramC, style_gramC)
                    elif typeC == 'content':
                        segC_loss = compute_content_loss(net, segC_net)
                else:
                    segC_loss=None
    
                grads_segA = tf.gradients(segA_loss, x_seg_raw)[0]
                norm_segA = tf.norm(grads_segA)
    
                grads_segB = tf.gradients(segB_loss, x_seg_raw)[0]
                norm_segB = tf.norm(grads_segB)
    
                if inputC is not None:
                    grads_segC = tf.gradients(segC_loss, x_seg_raw)[0]
                    norm_segC = tf.norm(grads_segC)
              
            if args['mode']=='ODF':
                # cosine distance loss from spectral difference evelopes
                with g.device(odf_dev):
                    sdif_odf_loss =tf.ones(())
                    odf_conv = tf.nn.conv2d(x_seg,kernel_odf,padding="VALID",strides=[1,1,1,1],name="conv_"+'odf')
                    odf_net = utils.selu(odf_conv)
                    
                    x_odf,x_sdif = compute_sdif_odf(odf_net)
                    
                    if args['target_odf_pattern'] == 'A':
                        _odf,_spec =compute_sdif_odf(xA_seg)
                    elif args['target_odf_pattern'] == 'B':
                        _odf,_spec =compute_sdif_odf(xB_seg)
                    elif args['target_odf_pattern'] == 'C':
                        _odf,_spec =compute_sdif_odf(xC_seg)
                    else:
                        _odf,_spec =compute_sdif_odf(xA_seg)
                    
                    sdif_odf_loss = tf.losses.cosine_distance(x_odf,_odf,dim=0)
            else:
                sdif_odf_loss =0
                
            # compute outputs
            with g.device(output_dev):
                total_inputA_loss=0
                total_inputB_loss=0
                total_inputC_loss=0
                total_odf_loss = sdif_odf_loss
                total_inputA_loss = wA * segA_loss
                total_inputB_loss = wB * segB_loss
                if inputC is not None: total_inputC_loss = float(wC) *segC_loss
                
                total_loss = total_inputA_loss + total_inputB_loss + total_inputC_loss + total_odf_loss
                
                with tf.Session(config=config) as sess:    
                    sess.run(tf.global_variables_initializer())
                    feed_dict ={beta_stft: 1.0}
                    
                    segA = norm_segA.eval(feed_dict=feed_dict)
                    segB = norm_segB.eval(feed_dict=feed_dict)
                    
                    if inputC is not None: segC = norm_segC.eval(feed_dict=feed_dict)
                    # normalize loss gradient
                    beta_vals =1.0
                    beta_vals =segA/segB
                    if inputC is not None: beta_vals =abs(segA+segB+segC)/3.0
                    feed_dict[beta_stft]=beta_vals 
                    
                    # train optimizer
                    opt = tf.contrib.opt.ScipyOptimizerInterface(total_loss,var_list=[x_seg_raw],method='L-BFGS-B',options={'maxiter': args['iterations'],'ftol': args['factor']* np.finfo(float).eps,'gtol': args['factor']* np.finfo(float).eps},tol=args['factor'])
                    opt.minimize(sess, feed_dict=feed_dict)
                    res = x_seg_raw.eval()
                    print 'Segment loss:', total_loss.eval(feed_dict=feed_dict)
            
        result.append(res.squeeze())
    return result
Exemplo n.º 17
0
            if opt.plot_online:
                line.set_data(dataQ[:, 0].detach().cpu().numpy(),
                              dataQ[:, 1].detach().cpu().numpy())
                plt.pause(0.01)

    return dataQ, collQ, coll_mmd


if __name__ == "__main__":
    FILENAME = opt.expDirName + '/' + opt.expName + '_'
    FILENAME += opt.src.split('/')[-1][:3] + '-' + opt.target.split(
        '/')[-1][:3]

    device, num_gpus = get_devices(
        "cuda:0" if not opt.no_cuda and torch.cuda.is_available() else "cpu",
        seed=opt.seed)

    # Load data
    dataQ0 = load_data(opt.src, opt.nPointsMax_src).to(device)
    dataP, wP = load_data_weights(opt.target, opt.target_weights,
                                  opt.nPointsMax_target)
    dataP = dataP.to(device)
    if wP is not None:
        wP = wP.to(device)

    n_samples, n_features = dataQ0.shape

    print(opt)

    # Discriminator
Exemplo n.º 18
0
from airqo_batch_insert import get_device_measurements
from config import configuration
from kcca_batch_insert import ProcessMeasurements
from utils import get_devices, filter_valid_devices, filter_valid_kcca_devices

if __name__ == "__main__":

    tenant = configuration.TENANT
    if not tenant:
        print("Tenant not specified")
        exit()

    if tenant.strip().lower() == "airqo":
        airqo_devices = get_devices(configuration.AIRQO_BASE_URL, "airqo")
        filtered_devices = filter_valid_devices(airqo_devices)

        if len(filtered_devices) > 0:
            get_device_measurements(filtered_devices)
        else:
            print("No valid devices")

    elif tenant.strip().lower() == "kcca":
        kcca_devices = get_devices(configuration.AIRQO_BASE_URL, "kcca")
        filtered_devices = filter_valid_kcca_devices(kcca_devices)
        if len(filtered_devices) > 0:
            process_measurements = ProcessMeasurements(filtered_devices)
            process_measurements.begin_fetch()
        else:
            print("No valid devices")
    else:
        print("Error", "Invalid Tenant", sep=" : ")
	def page3_prepare(self):
		self.page3_ls.clear()
		for dev,_type in utils.get_devices():
			self.page3_ls.append([False, dev, _type, False, ""])
Exemplo n.º 20
0
        default=None,
        type=str,
        help="use GPUs with its device ids, separated by commas")
    parser.add_argument('--model',
                        default=None,
                        type=str,
                        help="pretrained model to be used in prediction")
    parser.add_argument('input_csv',
                        type=str,
                        help="input csv filepath for test")
    parser.add_argument('output_csv',
                        type=str,
                        help="output csv filepath for test")
    args = parser.parse_args()

    device = get_devices(args.cuda)[0]
    print_versions()

    mode = Mode.PER_STUDY
    LABELS = [
        'Atelectasis', 'Cardiomegaly', 'Consolidation', 'Edema',
        'Pleural Effusion'
    ]
    model_path = Path(args.model).resolve()
    #model_path = Path("train_20190526_per_study/model_epoch_030.pth.tar").resolve()
    env = PredictEnvironment(5, device, mode, model_path=model_path)
    p = Predictor(env)

    input_path = Path(args.input_csv).resolve()
    entries = load_manifest(input_path, mode)
    base_path = input_path.parent
Exemplo n.º 21
0
def train(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    dataloader = create_dataloader(args)
    vocabs = dataloader.dataset.vocabs
    if args.validate:
        val_dataloader = create_dataloader(args, vocabs, True)
    else:
        val_dataloader = None
    fnames = [f"{mode}.vocab" for mode in MODES]
    for vocab, fname in zip(vocabs, fnames):
        utils.save_pkl(vocab, os.path.join(args.save_dir, fname))

    logging.info("Initializing training environment...")
    resume_from = dict()
    if args.resume_from is not None:
        resume_from = torch.load(args.resume_from)
    mdl = prepare_model(args, vocabs, resume_from)
    mdl = utils.to_device(mdl, devices)
    optimizer_cls = get_optimizer_cls(args)
    validator = None
    if args.validate:
        validator = Validator(
            model=mdl,
            device=devices[0],
            vocabs=vocabs,
            bos=args.bos,
            eos=args.eos,
            unk=args.unk,
            alpha=args.loss_alpha,
            beta=args.loss_beta,
            progress=args.show_progress,
            batch_stats=args.log_stats
        )
    trainer = Trainer(
        model=mdl,
        model_path=args.model_path,
        alpha=args.loss_alpha,
        beta=args.loss_beta,
        device=devices[0],
        vocabs=vocabs,
        epochs=args.epochs,
        save_dir=args.save_dir,
        save_period=args.save_period,
        optimizer_cls=optimizer_cls,
        samples=args.samples,
        tensorboard=args.tensorboard,
        progress=args.show_progress,
        validator=validator,
        batch_stats=args.log_stats,
        early_stop=args.early_stop,
        early_stop_criterion=args.early_stop_criterion,
        early_stop_patience=args.early_stop_patience
    )
    trainer.load_snapshot(resume_from)
    report_model(trainer)

    logging.info("Commencing training joint-lu...")
    trainer.train(dataloader, val_dataloader)

    logging.info("Done!")