Ejemplo n.º 1
0
def validate_model(
    net: torch.nn.Module,
    dataloader: torch.utils.data.DataLoader,
    classes: list,
    update: int,
    device: torch.device,
    plotter: Plotter,
    loss_fn=torch.nn.BCELoss()) -> Tuple[Tensor, dict, dict]:
    """
    Validate current model on validation set and return loss and metrics
    """
    plots_path = os.path.join('results', 'intermediate', 'plots')
    metrics_path = os.path.join('results', 'intermediate', 'metrics')
    os.makedirs(plots_path, exist_ok=True)
    os.makedirs(metrics_path, exist_ok=True)

    loss = torch.tensor(0., device=device)
    with torch.no_grad():
        target_list = []
        prediction_list = []
        # calculate targets and predictions on validation set
        for data in tqdm.tqdm(dataloader, desc='scoring', position=0):
            inputs, targets, _, idx = data
            inputs = inputs.to(device, dtype=torch.float32)
            targets = targets.to(device, dtype=torch.float32)
            predictions = net(inputs)
            loss += loss_fn(predictions, targets)
            # plot results
            target_array = targets.detach().cpu().numpy()
            prediction_array = predictions.detach().cpu().numpy()
            target_list.extend([*target_array])
            prediction_list.extend([*prediction_array])
        loss /= len(dataloader)
        # pick some random excerpts and plot them
        num_plots = 4
        indices = random.choices(np.arange(len(target_list)), k=num_plots)
        targets = np.stack(
            [t for i, t in enumerate(target_list) if i in indices])
        predictions = np.stack(
            [t for i, t in enumerate(prediction_list) if i in indices])
        plotter.plot(targets, predictions, plots_path, update)
        # compute dcase metrics
        targets = np.stack(target_list)
        predictions = np.stack(prediction_list)
        metrics = metric.compute_dcase_metrics([targets], [predictions],
                                               classes)
        metrics_pp = metric.compute_dcase_metrics(
            [targets], [postproc.post_process_predictions(predictions)],
            classes)
        metric.write_dcase_metrics_to_file(metrics, metrics_path,
                                           f"{update:07d}.txt")
        metric.write_dcase_metrics_to_file(metrics_pp, metrics_path,
                                           f"{update:07d}_pp.txt")
    return loss, metrics, metrics_pp
Ejemplo n.º 2
0
def main( args ):
    global stayingAlive
    ssl.wrap_socket = sslwrap(ssl.wrap_socket)
    lines=[(1,"Connections"),(2,"roundtrip")]
    p = Plotter(lines,'%s'%args)
    t=Tester( args,p )
    t.start()

    p.runMe()
    stayingAlive=False
    t.join()
Ejemplo n.º 3
0
def train(env):
    value_net = Critic(1290, 128, 256, params['critic_weight_init']).to(device)
    policy_net = Actor(1290, 128, 256, params['actor_weight_init']).to(device)
    target_value_net = Critic(1290, 128, 256).to(device)
    target_policy_net = Actor(1290, 128, 256).to(device)

    #Switiching off dropout layers
    target_value_net.eval()
    target_policy_net.eval()

    softUpdate(value_net, target_value_net, soft_tau=1.0)
    softUpdate(policy_net, target_policy_net, soft_tau=1.0)

    value_optimizer = optimizer.Ranger(value_net.parameters(),
                                       lr=params['value_lr'],
                                       weight_decay=1e-2)
    policy_optimizer = optimizer.Ranger(policy_net.parameters(),
                                        lr=params['policy_lr'],
                                        weight_decay=1e-5)
    value_criterion = nn.MSELoss()
    loss = {
        'test': {
            'value': [],
            'policy': [],
            'step': []
        },
        'train': {
            'value': [],
            'policy': [],
            'step': []
        }
    }

    plotter = Plotter(
        loss,
        [['value', 'policy']],
    )

    step = 0
    plot_every = 10
    for epoch in range(100):
        print("Epoch: {}".format(epoch + 1))
        for batch in (env.train_dataloader):
            loss, value_net, policy_net, target_value_net, target_policy_net, value_optimizer, policy_optimizer\
             = ddpg(value_net,policy_net,target_value_net,target_policy_net,\
              value_optimizer, policy_optimizer, batch, params, step=step)
            # print(loss)
            plotter.log_losses(loss)
            step += 1
            if step % plot_every == 0:
                print('step', step)
                test_loss = run_tests(env,step,value_net,policy_net,target_value_net,target_policy_net,\
                 value_optimizer, policy_optimizer,plotter)
                plotter.log_losses(test_loss, test=True)
                plotter.plot_loss()
            if step > 1500:
                assert False
Ejemplo n.º 4
0
    def plot(self):
        self.image_count = 0
        QtGui.QPixmapCache.clear()  # clear qt image cache
        self.stop_button.setEnabled(True)
        self.plot_button.setEnabled(False)
        self.animate_button.setEnabled(False)

        # send dates in decimal format to worker
        start_date = self.start_year.value() + (1 + self.start_month.currentIndex() * 2) / 24
        end_date = self.end_year.value() + (1 + self.end_month.currentIndex() * 2) / 24

        self.worker = Plotter(start_date, end_date, self.plot_step, self.color_map, self)
        self.worker.image_increment_signal.connect(self.add_image)
        self.worker.finished.connect(self.del_worker)
        self.worker.status_signal.connect(self.set_status)

        self.worker.start()
Ejemplo n.º 5
0
    def process_step(self, action, add_to_plotter=True):
        """
        Take the action a certain number of times (Parameters.FRAME_SKIPPING)
        as described in the article
        Return the environment state after taking the action x times
        """

        lives_before_action = self.lives
        self.take_action(action)
        self.episode_score += self.reward
        if self.lives < lives_before_action:
            self.reward += Parameters.NEGATIVE_REWARD
            self.terminal = True
            if add_to_plotter:
                Plotter.add_episode_score(self.episode_score)
            self.episode_score = 0

        return (self.state, self.reward, self.terminal)
Ejemplo n.º 6
0
    def save_session(self):
        time_at_start_save = time.time()
        sys.stdout.write(
            '{}: [Step {}k  --  Took {:3.2f} s] '.format(
                datetime.now(),
                self.step // 1000,
                time_at_start_save - self.last_time))
        self.last_time = time_at_start_save

        Parameters.add_attr("CURRENT_STEP", self.step)
        a = time.time()
        Parameters.update()
        b = time.time()
        sys.stdout.write('[{:3.2f}s for json] '.format(b - a))

        save_file = path.join(
            Parameters.SESSION_SAVE_DIRECTORY,
            Parameters.SESSION_SAVE_FILENAME)
        if not path.exists(Parameters.SESSION_SAVE_DIRECTORY):
            makedirs(Parameters.SESSION_SAVE_DIRECTORY)
        a = time.time()
        self.tf_saver.save(self.tf_session, save_file)
        b = time.time()
        sys.stdout.write('[{:3.2f}s for tf] '.format(b - a))

        a = time.time()
        Plotter.save("out")
        b = time.time()
        sys.stdout.write('[{:3.2f}s for Plotter] '.format(b - a))
        a = time.time()
        self.memory.save_memory()
        b = time.time()
        sys.stdout.write('[{:3.2f}s for memory] '.format(b - a))
        post_save_time = time.time()
        sys.stdout.write(
            '[Required {:3.2f}s to save all] '.format(
                post_save_time -
                time_at_start_save))
        self.last_time = post_save_time
        elapsed_time = time.time() - self.initial_time
        remaining_seconds = elapsed_time * \
            (Parameters.MAX_STEPS - self.step) / (self.step - self.initial_step)
        print("eta: {}s".format((timedelta(seconds=remaining_seconds))))
def simulation(x, test_range, step_size, file, n = 100, runs = 1000, dim = 2, learn_rate = 1):
    '''
    Function runs a series of simulations with the perceptron on a number or randomly generated feature vectors.
    Depending on which variable we are controlling for the simulations fix the values for dimensionality, number of points, and learning rate (c value)
    The variable that we control for will (x) will be initialized to the low end of the test range and incremented by the step size repeatedly.
    With each incrementation of the step size, we run the perceptron (with weights/bias always initialized to zero) 1000 times.
    After each single run, we record the results (i.e. number or perceptron iterations required for convergence) as a row in our dataframe
    The results are saved to a csv
    :param x: variable to control for, must be 'n', 'dim', or 'c'
    :param test_range: range of variable to test
    :param step_size: how to incrament the variable
    :param file: save destination for csv
    :return: N/A
    '''
    # check for invalid x
    if x not in ['n', 'c', 'dim']:
        raise ValueError('Invalid parameter x')

    (low, high) = test_range
    val = low
    data = []
    plot = Plotter()

    while val < high:
        # Increment independent variable
        if x == 'n':
            n = val
        elif x == 'c':
            learn_rate = val
        elif x == 'dim':
            dim = val
        # Run perceptron 1000 times each on a randomly generated set of feature vectors
        for i in range(runs):
            features = plot.generate_points(n, dim)
            labels = plot.generate_labels_linear(features)
            model = Perceptron(dim, zeros=False)
            iterations = model.train(features,labels, c=learn_rate)
            data.append([n, dim, learn_rate, iterations])
        val += step_size

    # Move data to pandas dataframe and save
    df = pd.DataFrame(data, columns=['n features', 'dimensions', 'c', 'iterations'])
    df.to_csv(file, sep=',', index=False)
Ejemplo n.º 8
0
async def main():
    transactionCounts99 = {}
    plotter = Plotter()
    infoGetter = InfoGetter(
        "https://*****:*****@nd-806-802-183.p2pify.com"
    )
    latestBlock = infoGetter.getLatestTransactions()
    tasks = []
    async with aiohttp.ClientSession() as session:
        for selectedBlock in range(
                int(latestBlock, 16) - 100, int(latestBlock, 16)):
            task = asyncio.ensure_future(
                infoGetter.getTransactions(session, hex(selectedBlock)))
            tasks.append(task)

        responses = await asyncio.gather(*tasks)
        for response in responses:
            valuesAndKey = next(iter(response.items()))
            transactionCounts99[valuesAndKey[0]] = valuesAndKey[1]
        #we've completed the request, so now we can plot
        plotter.plot(transactionCounts99)
Ejemplo n.º 9
0
    def setup_ui(self):
        self.main_layout.setContentsMargins(0, 0, 0, 0)
        self.move(self.start_pos)
        self.main_layout.addWidget(self.color_list)
        self.setLayout(self.main_layout)
        self.color_list.addItems(Plotter.get_color_maps())

        self.setWindowFlag(QtCore.Qt.FramelessWindowHint, True)
        self.setWindowFlags(QtCore.Qt.Popup)

        self.color_list.itemDoubleClicked.connect(self.send_choice)
        self.show()
Ejemplo n.º 10
0
    def batch_q_learning(self):
        """
        Apply Q-learning updates, or minibatch updates, to samples of experience,
        (s, a, r, s') ~ U(D), drawn at random from the pool of stored samples.
        """

        if(self.memory.get_usage() > Parameters.AGENT_HISTORY_LENGTH):

            state_t, action, reward, state_t_plus_1, terminal, i_s_weights, memory_indices = self.memory.bring_back_memories()

            q_t_plus_1 = self.tf_session.run(
                self.target_dqn.q_values, {
                    self.target_dqn_input: state_t_plus_1})
            max_q_t_plus_1 = np.max(q_t_plus_1, axis=1)

            target_q_t = (1. - terminal) * \
                Parameters.DISCOUNT_FACTOR * max_q_t_plus_1 + reward

            _, q_t, losses = self.tf_session.run([self.dqn.optimize, self.dqn.q_values, self.dqn.errors],
                                                 {
                self.dqn.target_q: target_q_t,
                self.dqn.action: action,
                self.dqn_input: state_t,
                self.dqn.i_s_weights: i_s_weights
            })

            self.memory.update(
                memory_indices,
                np.squeeze(q_t),
                losses,
                self.get_learning_completion())
            input_shape = (1, Parameters.IMAGE_HEIGHT, Parameters.IMAGE_WIDTH, Parameters.AGENT_HISTORY_LENGTH)
            dqn_input = self.environment.get_input().reshape(input_shape)
            q_values = self.tf_session.run(
                self.dqn.q_values, {
                    self.dqn_input: dqn_input})
            Plotter.add_q_values_at_t(q_values)
        else:
            print('[WARNING] Not enough memory for a batch')
def test_for_error_n(test_range, step_size, file, learn_rate = 1, dim = 2, runs = 100):
    (low, high) = test_range
    n = low
    data = []
    plot = Plotter()
    df = pd.DataFrame()
    while n < high:
        for i in range(runs):
            features = plot.generate_points(n, dim)
            labels = plot.generate_labels_linear(features)

            df['features'] = features.tolist()
            df['labels'] = labels.tolist()
            train, testing = train_test_split(df, test_size=.25)

            model = Perceptron(dim, zeros=True)
            model.train(to_array(train['features']), to_array(train['labels']), c=learn_rate)
            error = model.test_error(to_array(testing['features']), to_array(testing['labels']))
            data.append([n, error])

    df = pd.DataFrame(data, columns=['n', 'error'])
    df.to_csv(file, sep=',', index=False)
Ejemplo n.º 12
0
 def observe(self):
     y_data_list = []
     addrs = []
     for addr, packet_bin in self.bins.iteritems():
         if len(packet_bin) > VALID_PACKET_COUNT_THRESHOLD:
             y_data_list.append(packet_bin.generate_y_data(self.observer))
             addrs.append(addr)
     plotter = Plotter(range(self.size), y_data_list)
     plotter.output_file = PLOT_DIR + '_'.join(self.plot_name.split()) + '.pdf'
     plotter.x_label = 'Packet Sequence Number'
     plotter.y_label = addrs
     plotter.plot()
Ejemplo n.º 13
0
    def __init__(self, parent, controller, manager):
        self.parent = parent
        super().__init__(self.parent)

        self.manager = manager

        self.plt = Plotter(self,
                           parent.parent.controller.force_sensor.getreading,
                           controller.experiment.exp_str)

        self.sizer = wx.BoxSizer(wx.VERTICAL)
        self.sizer.Add(self.plt, 0, wx.ALL | wx.EXPAND, 0)
        self.timer = wx.Timer(self, wx.ID_ANY)
        self.Bind(wx.EVT_TIMER, self.plt.update)

        self.SetSizerAndFit(self.sizer)
Ejemplo n.º 14
0
def train(config, dataset, model):
    # Data loaders
    train_loader, val_loader = dataset.train_loader, dataset.val_loader

    if 'use_weighted' not in config:
        # TODO (part c): define loss function
        criterion = None
    else:
        # TODO (part e): define weighted loss function
        criterion = None
    # TODO (part c): define optimizer
    learning_rate = config['learning_rate']
    optimizer = None

    # Attempts to restore the latest checkpoint if exists
    print('Loading model...')
    force = config['ckpt_force'] if 'ckpt_force' in config else False
    model, start_epoch, stats = checkpoint.restore_checkpoint(
        model, config['ckpt_path'], force=force)

    # Create plotter
    plot_name = config['plot_name'] if 'plot_name' in config else 'CNN'
    plotter = Plotter(stats, plot_name)

    # Evaluate the model
    _evaluate_epoch(plotter, train_loader, val_loader, model, criterion,
                    start_epoch)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config['num_epoch']):
        # Train model on training set
        _train_epoch(train_loader, model, criterion, optimizer)

        # Evaluate model on training and validation set
        _evaluate_epoch(plotter, train_loader, val_loader, model, criterion,
                        epoch + 1)

        # Save model parameters
        checkpoint.save_checkpoint(model, epoch + 1, config['ckpt_path'],
                                   plotter.stats)

    print('Finished Training')

    # Save figure and keep plot open
    plotter.save_cnn_training_plot()
    plotter.hold_training_plot()
Ejemplo n.º 15
0
DISPLAYSURF = pygame.display.set_mode(screenSize)
pygame.display.set_caption('Mood Color')
pygame.font.init()

#fontObj = pygame.font.Font('freesansbold.ttf',32)

fontObj = pygame.font.Font("RopaSans-Regular.ttf", 22, bold=True)
fontObj2 = pygame.font.Font("RopaSans-Regular.ttf", 36, bold=True)
textSurfaceObj2 = fontObj2.render('Welcome to Mood Color', True, (255, 102, 0))
textRectObj2 = textSurfaceObj2.get_rect()
textRectObj2.center = (swidth / 2, 40)

indicoStuff = Analysis()
pp = pprint.PrettyPrinter(indent=4)

pt = Plotter(500, 450, 480, 250, DISPLAYSURF, 'Sentiment Analysis')

x = [1,2,3,4,5,6,7,8,9]
y = [2,4,2,1,5,7,8,2,0]

x2 = [3,6,9]
y2 = [1,4,8]

#pt.getData(x, y)
#pt.getData2(x2,y2)

counter = 0
counter2 = 0

t1 = time.time()
t2 = time.time()
Ejemplo n.º 16
0
def main(eval_mode: bool, feature_type: str, scene: str, hyper_params: dict,
         network_config: dict, eval_settings: dict, fft_params: dict) -> None:
    """
    Main function that takes hyper-parameters, creates the architecture, trains the model and evaluates it
    """
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    os.makedirs('results', exist_ok=True)
    experiment_id = datetime.now().strftime(
        "%Y%m%d-%H%M%S") + f' - {feature_type} - {scene}'
    writer = SummaryWriter(log_dir=os.path.join('tensorboard', experiment_id))
    shutil.copyfile('config.json', os.path.join(
        'results', 'config.json'))  # save current config file to results
    training_dataset = BaseDataset(feature_type, scene, hyper_params,
                                   fft_params)
    # create network
    classes = util.get_scene_classes(scene)
    plotter = Plotter(classes,
                      hop_size=fft_params['hop_size'],
                      sampling_rate=22050)
    # finalize network config parameters
    network_config['out_features'] = len(classes)
    if feature_type == 'spec':
        network_config['n_features'] = fft_params['n_fft'] // 2 + 1
    elif feature_type == 'mfcc':
        network_config['n_features'] = fft_params['n_mfcc']
    elif feature_type == 'mels':
        network_config['n_features'] = fft_params['n_mels']
    # create network
    net = SimpleCNN(**network_config)
    # Save initial model as "best" model (will be overwritten later)
    model_path = os.path.join('results',
                              f'best_{feature_type}_{scene}_model.pt')
    if not os.path.exists(model_path):
        torch.save(net, model_path)
    else:  # if there already exists a model, just load parameters
        print(f'reusing pre-trained model: "{model_path}"')
        net = torch.load(model_path, map_location=torch.device('cpu'))
    net.to(device)
    # get loss function
    loss_fn = torch.nn.BCELoss()
    # create adam optimizer
    optimizer = torch.optim.Adam(net.parameters(),
                                 lr=hyper_params['learning_rate'],
                                 weight_decay=hyper_params['weight_decay'])

    train_stats_at = eval_settings['train_stats_at']
    validate_at = eval_settings['validate_at']
    best_loss = np.inf  # best validation loss so far
    progress_bar = tqdm.tqdm(total=hyper_params['n_updates'],
                             desc=f"loss: {np.nan:7.5f}",
                             position=0)
    update = 0  # current update counter

    fold_idx = 1  # one random fold (defines split into training and validation set)
    rnd_augment = hyper_params['rnd_augment']
    # create subsets and data loaders
    if eval_mode:
        train_subset = training_dataset
        val_loader = None
    else:
        train_subset = Subset(training_dataset,
                              training_dataset.get_fold_indices(fold_idx)[0])
        val_subset = Subset(training_dataset,
                            training_dataset.get_fold_indices(fold_idx)[1])
        val_set = ExcerptDataset(val_subset,
                                 feature_type,
                                 classes,
                                 hyper_params['excerpt_size'],
                                 fft_params,
                                 overlap_factor=1,
                                 rnd_augment=False)
        val_loader = DataLoader(val_set,
                                batch_size=hyper_params['batch_size'],
                                shuffle=False,
                                num_workers=0)

    train_set = ExcerptDataset(
        train_subset,
        feature_type,
        classes,
        hyper_params['excerpt_size'],
        fft_params,
        overlap_factor=hyper_params['train_overlap_factor'],
        rnd_augment=rnd_augment)
    train_loader = DataLoader(train_set,
                              batch_size=hyper_params['batch_size'],
                              shuffle=True,
                              num_workers=0)

    n_updates = hyper_params['n_updates']
    # main training loop
    while update <= n_updates:
        if rnd_augment and update > 0:
            # regenerate new excerpts (in background) but use current ones for training
            train_set.generate_excerpts()
        for data in train_loader:
            inputs, targets, audio_file, idx = data
            inputs = inputs.to(device, dtype=torch.float32)
            targets = targets.to(device, dtype=torch.float32)
            optimizer.zero_grad()
            predictions = net(inputs)
            loss = loss_fn(predictions, targets)
            loss.backward()
            optimizer.step()

            if update % train_stats_at == 0 and update > 0:
                # log training loss
                writer.add_scalar(tag="training/loss",
                                  scalar_value=loss.cpu(),
                                  global_step=update)

            if not eval_mode and update % validate_at == 0 and update > 0:
                # evaluate model on validation set, log parameters and metrics
                val_loss, metrics, metrics_pp = validate_model(
                    net, val_loader, classes, update, device, plotter)
                print(f'val_loss: {val_loss}')
                f_score = metrics['segment_based']['overall']['F']
                err_rate = metrics['segment_based']['overall']['ER']
                f_score_pp = metrics_pp['segment_based']['overall']['F']
                err_rate_pp = metrics_pp['segment_based']['overall']['ER']
                print(f'f_score: {f_score}')
                print(f'err_rate: {err_rate}')
                print(f'f_score_pp: {f_score_pp}')
                print(f'err_rate_pp: {err_rate_pp}')
                params = net.parameters()
                log_validation_params(writer, val_loss, params, metrics,
                                      metrics_pp, update)
                # Save best model for early stopping
                if val_loss < best_loss:
                    print(
                        f'{val_loss} < {best_loss}... saving as new {os.path.split(model_path)[-1]}'
                    )
                    best_loss = val_loss
                    torch.save(net, model_path)

            if eval_mode:
                # in eval mode, just compare train_loss
                train_loss = loss.cpu()
                if train_loss < best_loss:
                    print(
                        f'{train_loss} < {best_loss}... saving as new {os.path.split(model_path)[-1]}'
                    )
                    best_loss = train_loss
                    torch.save(net, model_path)

            # update progress and update-counter
            progress_bar.set_description(f"loss: {loss:7.5f}", refresh=True)
            progress_bar.update()
            update += 1
            if update >= n_updates:
                break

    progress_bar.close()
    print('finished training.')

    print('starting evaluation...')
    evaluator = evaluation.Evaluator(feature_type, scene, hyper_params,
                                     network_config, fft_params, model_path,
                                     device, writer, plotter)
    evaluator.evaluate()
    print('zipping "results" folder...')
    util.zip_folder('results', f'results_{feature_type}_{scene}')
        popt, pcov = fittingclass.fitData(x, yn, funname)
        r_squared = statcalc.computeRSquare(x, yn, popt,
                                            fittingclass.getFun(funname))

        #print(y, *popt, " R2: ", r_squared)
        _results_ = y + " " + str([round(p, 5)
                                   for p in popt]) + " R2: ", str(r_squared)
        _reportlog_.append(LOGROW.format(dt=str(datetime.now()), tx=_results_))
        data_prediction = prediction.predict(x[len(x) - 1],
                                             fittingclass.getFun(funname),
                                             popt)
        for d in data_prediction:
            _predictionlog_.append(
                PREDICTIONLOG.format(subj=y,
                                     pd=d,
                                     val=str(round(data_prediction[d], 5))))

        if config['APP']['showplot'] == "1":
            _funlabel_ = fittingclass.getFunRepr(funname, popt)
            _funlabel_ += "  R2: " + str(round(r_squared, 3))
            Plotter.plot_covid_data(x, yn, y, fittingclass.getFun(funname),
                                    popt, _funlabel_)

    _reportlog_.append(
        LOGROW.format(dt=str(datetime.now()), tx="End Process " + _idelab_))

    with open(path.join(_reportdir_, filereportname), 'w') as fw:
        fw.write("\n".join(_reportlog_))
    with open(path.join(_reportdir_, predictionfilename), 'w') as fw:
        fw.write("\n".join(_predictionlog_))
Ejemplo n.º 18
0
Example: 
./app.py 'data/Africa_SRF.1970*.nc' t2m 'obs/CRUTMP.CDF' TMP
"""

if __name__ == "__main__":
    if len(sys.argv) != 5:
        print usage
        sys.exit(1)
    pattern = sys.argv[1]
    nc_var = sys.argv[2]
    obs_pattern = sys.argv[3]
    obs_nc_var = sys.argv[4]

    r = RegCMReader(pattern)
    value = r.get_value(nc_var).mean()
    time_limits = value.get_limits('time')
    crd_limits = value.get_latlonlimits()

    obs_r = CRUReader(obs_pattern)
    obs_value = obs_r.get_value(obs_nc_var, imposed_limits={'time': time_limits}, latlon_limits=crd_limits).mean()
    if obs_nc_var == "TMP":
        obs_value.to_K()

    value.regrid(obs_value.latlon)
    diff = obs_value - value
    plt = Plotter(diff)
    plt.plot(levels = (-5, 5))
    plt.show()
    plt.save('image', format='png')
    plt.close()
Ejemplo n.º 19
0
def plot_figures():
    Plotter.load(OUT_FOLDER)
    Plotter.save_plots(OUT_FOLDER)
Ejemplo n.º 20
0
    group.add_argument(
        '--random',
        action='store_true',
        help=
        'Play 500 games with random action selection and print the mean/std')
    group.add_argument('--play',
                       action='store_true',
                       help='Play the game with pre-trained model')

    args = parser.parse_args()
    assert exists(args.parameters_json)

    if args.train:
        train(args.parameters_json)
    elif args.plot:
        plot_figures()
    elif args.plot_tsne:
        plot_tsne(args.parameters_json)
    elif args.plot_layers:
        plot_conv_layers(args.parameters_json)
    elif args.reset_plot:
        Plotter.reset(OUT_FOLDER)
        Memory.reset(args.parameters_json)
        print(
            "Training results deleted from folder %s and Memory was removed from root."
            % OUT_FOLDER)
    elif args.random:
        play_random()
    elif args.play:
        play_pre_trained(args.parameters_json)
Ejemplo n.º 21
0
def main():
    print("Loading wordvecs...")
    if utils.exists("glove", "glove.840B.300d.txt", "gutenberg"):
        words, wordvecs = utils.load_glove("glove", "glove.840B.300d.txt", "gutenberg")
    else:
        words, wordvecs = utils.load_glove("glove", "glove.840B.300d.txt", "gutenberg",
                                           set(map(clean_word, gutenberg.words())))

    wordvecs_norm = wordvecs / np.linalg.norm(wordvecs, axis=1).reshape(-1, 1)

    print("Loading corpus...")
    # Convert corpus into normed wordvecs, replacing any words not in vocab with zero vector
    sentences = [[wordvecs_norm[words[clean_word(word)]] if clean_word(word) in words.keys() else np.zeros(WORD_DIM)
                  for word in sentence]
                 for sentence in gutenberg.sents()]

    print("Processing corpus...")
    # Pad sentences shorter than SEQUENCE_LENGTH with zero vectors and truncate sentences longer than SEQUENCE_LENGTH
    s_train = list(map(pad_or_truncate, sentences))

    np.random.shuffle(s_train)

    # Truncate to multiple of BATCH_SIZE
    s_train = s_train[:int(len(s_train) / BATCH_SIZE) * BATCH_SIZE]

    s_train_idxs = np.arange(len(s_train))

    print("Generating graph...")
    network = NlpGan(learning_rate=LEARNING_RATE, d_dim_state=D_DIM_STATE, g_dim_state=G_DIM_STATE,
                     dim_in=WORD_DIM, sequence_length=SEQUENCE_LENGTH)

    plotter = Plotter([2, 1], "Loss", "Accuracy")
    plotter.plot(0, 0, 0, 0)
    plotter.plot(0, 0, 0, 1)
    plotter.plot(0, 0, 1, 0)
    plotter.plot(0, 1, 1, 0)

    #d_vars = [var for var in tf.trainable_variables() if 'discriminator' in var.name]
    saver = tf.train.Saver()

    with tf.Session() as sess:
        #eval(sess, network, words, wordvecs_norm, saver)

        sess.run(tf.global_variables_initializer())
        #resume(sess, saver, plotter, "GAN_9_SEQUENCELENGTH_10", 59)

        d_loss, g_loss = 0.0, 0.0
        for epoch in range(0, 10000000):
            print("Epoch %d" % epoch)

            np.random.shuffle(s_train_idxs)
            for batch in range(int(len(s_train_idxs) / BATCH_SIZE)):
                # select next random batch of sentences
                s_batch_real = [s_train[x] for x in s_train_idxs[batch:batch + BATCH_SIZE]] # shape (BATCH_SIZE, SEQUENCE_LENGTH, WORD_DIM)

                # reshape to (SEQUENCE_LENGTH, BATCH_SIZE, WORD_DIM) while preserving sentence order
                s_batch_real = np.array(s_batch_real).swapaxes(0, 1)

                if d_loss - g_loss > MAX_LOSS_DIFF and False:
                    output_dict = sess.run(
                        network.get_fetch_dict('d_loss', 'd_train', 'g_loss'),
                        network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB)
                    )
                elif g_loss - d_loss > MAX_LOSS_DIFF and False:
                    output_dict = sess.run(
                        network.get_fetch_dict('d_loss', 'g_loss', 'g_train'),
                        network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB)
                    )
                else:
                    output_dict = sess.run(
                        network.get_fetch_dict('d_loss', 'd_train', 'g_loss', 'g_train'),
                        network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB,
                                              instance_variance=INSTANCE_VARIANCE)
                    )

                d_loss, g_loss = output_dict['d_loss'], output_dict['g_loss']

                if batch % 10 == 0:
                    print("Finished training batch %d / %d" % (batch, int(len(s_train) / BATCH_SIZE)))
                    print("Discriminator Loss: %f" % output_dict['d_loss'])
                    print("Generator Loss: %f" % output_dict['g_loss'])
                    plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), d_loss, 0, 0)
                    plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), g_loss, 0, 1)

                if batch % 100 == 0:
                    eval = sess.run(
                        network.get_fetch_dict('g_outputs', 'd_accuracy'),
                        network.get_feed_dict(inputs=s_batch_real, input_dropout=1.0,
                                              instance_variance=INSTANCE_VARIANCE)
                    )
                    # reshape g_outputs to (BATCH_SIZE, SEQUENCE_LENGTH, WORD_DIM) while preserving sentence order
                    generated = eval['g_outputs'].swapaxes(0, 1)
                    for sentence in generated[:3]:
                        for wordvec in sentence:
                            norm = np.linalg.norm(wordvec)
                            word, similarity = nearest_neighbor(words, wordvecs_norm, wordvec / norm)
                            print("{}({:4.2f})".format(word, similarity))
                        print('\n---------')
                    print("Total Accuracy: %f" % eval['d_accuracy'])
                    plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), eval['d_accuracy'], 1, 0)

            saver.save(sess, './checkpoints/{}.ckpt'.format(SAVE_NAME),
                       global_step=epoch)
            plotter.save(SAVE_NAME)
Ejemplo n.º 22
0
    def processMeasure(self, measure):
        # Measure format is '[HR] [BC]' where HR is heart rate and BC is beat count
        heartRate = int(measure.split()[0])
        beatCount = int(measure.split()[1])
        print("HR: {0} BC: {1}".format(heartRate, beatCount))
        self.heartRate = heartRate
        
    def processProblem(self, problem):
        print("Problem:", problem)

if __name__ == '__main__':
    handler = HRMHandler()
    server = sensorserver.SensorTCPServer()

    print("Starting server and waiting for connection")
    server.startServer(('', 4004), handler)
    server.waitForConnection()
    
    print("Starting loop")
    server_thread = threading.Thread(target = server.loop)
    server_thread.daemon = True
    server_thread.start()
    
    plotter = Plotter(handler)
    plotter.animate(20)
    
    plotter.getPlot().show()
    # Execution continues after user closes the window
        
    print("Shutting down")
    server.shutdown()
Ejemplo n.º 23
0
def plot_calendar(
    *,
    zip_path,
    year,
    plot_size=1,
    n_cols=4,
    month_gap=0,
    col_gap=0.5,
    sport="running",
    label=None,
):
    """Plot a year of Strava data in a calendar layout.

    Parameters
    ----------
    zip_path : str
        Path to .zip archive from Strava
    year : int
        Year of data to use. We have to unzip and read each file in the archive
        to figure out what year it is from, and this takes around 5 minutes for
        a new year of data.
    plot_size : float (default=1)
        The size of the plot is dynamically chosen for the layout, but you can make
        it bigger or smaller by making this number bigger or smaller.
    n_cols : int (default=4)
        Number of columns to divide the days into. Splits evenly on months, so
        this number should evenly divide 12.
    month_gap : float (default=0)
        Vertical space between two months. Each calendar square is 1 x 1, so
        a value of 1.5 here would move the months 1.5 calendar squares apart.
    col_gap : float (default=0.5)
        Horizontal space between columns. A calendar square is 1 x 1, so a
        value of 0.5 here puts columns half a square apart.
    sport : str (default="running")
        Sport to plot routes for. I have not tested this with anything except
        running, but maybe you get lucky!
    label : str or None
        Label in the top left corner of the plots. Defaults to the year. Use ""
        to not have any label.

    Returns
    -------
    figure, axis
        The matplotlib figure and axis with the plot. These can be used
        for further customization.
    """
    data = get_data(
        zip_path,
        sport,
        datetime.datetime(year, 1, 1),
        datetime.datetime(year + 1, 1, 1),
    )

    plotter = Plotter(data)

    fig, ax = plt.subplots(figsize=(plot_size * 5 * n_cols,
                                    plot_size * 40 / n_cols))
    fig, ax = plotter.plot_year(year=year,
                                fig=fig,
                                ax=ax,
                                n_cols=n_cols,
                                month_gap=month_gap,
                                col_gap=col_gap)
    if label is None:
        label = str(year)
    ax.text(0,
            -1,
            label,
            fontdict={
                "fontsize": 32,
                "fontweight": "heavy"
            },
            alpha=0.5)
    return fig, ax
Ejemplo n.º 24
0
Archivo: s.py Proyecto: pinakm9/fem

bc = DirichletBC(FS, u_D, boundary)

# Define variational problem
F_1 = ((v - v_n) / k)*f_1*dx + epsilon_1*v*f_1*dx + kappa*grad(T)[0]*f_1*dx +\
 A_1hat*((T - T_n) / k)*f_2*dx + a_T*grad(T)[0]*v*f_2*dx + M_s1*grad(v)[0]*f_2*dx - Q*f_2*dx


F_2 = ((v - v_n) / k)*f_1*dx + epsilon_1*v*f_1*dx + beta*v*grad(v)[0]*f_1*dx + kappa*grad(T)[0]*f_1*dx +\
 A_1hat*((T - T_n) / k)*f_2*dx + a_T*v*grad(T)[0]*f_2*dx + M_s1*grad(v)[0]*f_2*dx - Q*f_2*dx

# Create VTK files for visualization output
"""vtkfile_v = File('qtcm1/velocity.pvd')
vtkfile_T = File('qtcm1/temperature.pvd')"""
pltr = Plotter(mesh, id_='4')
# Solve the system for each time step
t = 0
v_ = lambda y: v_n([y])
T_ = lambda y: T_n([y])

for n in range(num_steps):
    #pltr.plot(v_,'qtcm1/velocity/', n, t, quantity = 'velocity_42')
    pltr.plot(T_, 'qtcm1/velocity/', n, t, quantity='temp_43')
    t += dt
    # Solve variational problem for time step
    J = derivative(F_1, u)
    solve(F_1 == 0, u, bc, J=J)
    # Save solution to file (VTK)
    """_v, _T = u.split()
	vtkfile_v << (_v, t)
Ejemplo n.º 25
0
    beg_yr = int(sys.argv[2])
    end_yr = int(sys.argv[3])
    nc_var = sys.argv[4]
    obs_pattern = sys.argv[5]
    obs_nc_var = sys.argv[6]

    for yr in xrange(beg_yr, end_yr):
        pattern = os.path.join(nc_dir, '*' + str(yr) + '*.nc')
        #        for
        r = RegCMReader(pattern)
    value = r.get_value(nc_var).mean()
    time_limits = value.get_limits('time')
    crd_limits = value.get_latlonlimits()

    obs_r = CRUReader(obs_pattern)
    obs_value = obs_r.get_value(obs_nc_var,
                                imposed_limits={
                                    'time': time_limits
                                },
                                latlon_limits=crd_limits).mean()
    if obs_nc_var == "TMP":
        obs_value.to_K()

    value.regrid(obs_value.latlon)
    diff = obs_value - value
    plt = Plotter(diff)
    plt.plot(levels=(-5, 5))
    plt.show()
    plt.save('image', format='png')
    plt.close()
Ejemplo n.º 26
0
from plot import Plotter
from data import Data
from singleLayerNN import SingleLayerNeuralNetwork
from multiLayerNN import MultiLayerNeuralNetwork
data = Data('./dataset.csv')
plotter = Plotter(data)
slnn = SingleLayerNeuralNetwork(data, 0.01, 1000)
weightsSLNN, precisionSLNN = slnn.run()
mlnn = MultiLayerNeuralNetwork(data, 0.1, 10000)
weightsMLNN, precisionMLNN = mlnn.run()
print("\nSingle Layer Neural Net Precision:\t", precisionSLNN, "%")
print("Multi Layer Neural Net Precision: \t", precisionMLNN, "%")
plotter.plot(weightsSLNN, weightsMLNN)



Ejemplo n.º 27
0
	soft_update(value_net1, target_value_net1, soft_tau=1.0)
	soft_update(value_net2, target_value_net2, soft_tau=1.0)
	soft_update(perturbator_net, target_perturbator_net, soft_tau=1.0)

	# optim.Adam can be replaced with RAdam
	value_optimizer1 = optimizer.Ranger(value_net1.parameters(), lr=params['value_lr'], k=10)
	value_optimizer2 = optimizer.Ranger(value_net2.parameters(), lr=params['perturbator_lr'], k=10)
	perturbator_optimizer = optimizer.Ranger(perturbator_net.parameters(), lr=params['value_lr'], weight_decay=1e-3,k=10)
	generator_optimizer = optimizer.Ranger(generator_net.parameters(), lr=params['generator_lr'], k=10)
	
	loss = {
		'train': {'value': [], 'perturbator': [], 'generator': [], 'step': []},
		'test': {'value': [], 'perturbator': [], 'generator': [], 'step': []},
		}
	
	plotter = Plotter(loss, [['generator'], ['value', 'perturbator']])


	for epoch in range(n_epochs):
		print("Epoch: {}".format(epoch+1))
		for batch in env.train_dataloader:
			loss = bcq_update(batch, params, writer, debug, step=step)
			plotter.log_losses(loss)
			step += 1
			print("Loss:{}".format(loss))
			if step % plot_every == 0:
				print('step', step)
				test_loss = run_tests(env,params,writer,debug)
				print(test_loss)
				plotter.log_losses(test_loss, test=True)
				plotter.plot_loss()
Ejemplo n.º 28
0
class MainWindow(QtWidgets.QMainWindow):
    stop_plot_signal = QtCore.pyqtSignal()

    def __init__(self, settings):
        super(MainWindow, self).__init__()
        self.central_widget = QtWidgets.QWidget(self)
        self.main_layout = QtWidgets.QVBoxLayout(self.central_widget)
        self.image_label = QtWidgets.QLabel(self.central_widget)
        self.status_bar = QtWidgets.QStatusBar(self)
        self.image_slider = QtWidgets.QSlider(self.central_widget,
                                              orientation=QtCore.Qt.Horizontal)
        self.bottom_layout = QtWidgets.QHBoxLayout()

        self.move_year_left_button = QtWidgets.QPushButton()
        self.move_year_right_button = QtWidgets.QPushButton()

        self.button_layout = QtWidgets.QVBoxLayout()

        self.plot_button = QtWidgets.QPushButton()
        self.stop_button = QtWidgets.QPushButton(enabled=False)

        self.sdate_animate_layout = QtWidgets.QVBoxLayout()
        self.start_date_layout = QtWidgets.QHBoxLayout()

        self.start_year = QtWidgets.QSpinBox()
        self.start_month = QtWidgets.QComboBox()
        self.animate_button = QtWidgets.QPushButton(enabled=False)

        self.edate_pref_layout = QtWidgets.QVBoxLayout()
        self.end_date_layout = QtWidgets.QHBoxLayout()

        self.end_year = QtWidgets.QSpinBox()
        self.end_month = QtWidgets.QComboBox()
        self.preferences_button = QtWidgets.QPushButton()

        self.animate_timer = QtCore.QTimer()
        self.image_count = 0
        self.settings = settings

    def setup_ui(self):
        self.save_default_settings()
        self.setWindowFlag(QtCore.Qt.MSWindowsFixedSizeDialogHint)
        self.setCentralWidget(self.central_widget)

        self.setStatusBar(self.status_bar)
        self.status_bar.setSizeGripEnabled(False)

        self.retranslate_ui()
        self.set_ranges_values()
        self.connect_signals()
        self.set_shortcuts()

        spacer = QtWidgets.QSpacerItem(1, 1, QtWidgets.QSizePolicy.Expanding,
                                       QtWidgets.QSizePolicy.Expanding)

        self.start_date_layout.addWidget(self.start_month)
        self.start_date_layout.addWidget(self.start_year)

        self.sdate_animate_layout.addLayout(self.start_date_layout)
        self.sdate_animate_layout.addWidget(self.animate_button)

        self.button_layout.addWidget(self.plot_button, alignment=QtCore.Qt.AlignCenter)
        self.button_layout.addWidget(self.stop_button, alignment=QtCore.Qt.AlignCenter)

        self.end_date_layout.addWidget(self.end_month)
        self.end_date_layout.addWidget(self.end_year)

        self.edate_pref_layout.addLayout(self.end_date_layout)
        self.edate_pref_layout.addWidget(self.preferences_button)

        self.bottom_layout.addLayout(self.sdate_animate_layout)
        self.bottom_layout.addSpacerItem(spacer)
        self.bottom_layout.addWidget(self.move_year_left_button)
        self.bottom_layout.addLayout(self.button_layout)
        self.bottom_layout.addWidget(self.move_year_right_button)
        self.bottom_layout.addSpacerItem(spacer)
        self.bottom_layout.addLayout(self.edate_pref_layout)

        self.main_layout.addWidget(self.image_label, alignment=QtCore.Qt.AlignCenter)
        self.main_layout.addSpacerItem(spacer)
        self.main_layout.addWidget(self.image_slider)
        self.main_layout.addLayout(self.bottom_layout)

        self.show()
        self.preferences_button.pressed.connect(self.show_options)
        self.set_sizes()

    def set_shortcuts(self):
        year_move_right = QtWidgets.QShortcut(
            QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_Right), self)

        year_move_right.activated.connect(lambda: self.move_slider(self.year_step))

        year_move_left = QtWidgets.QShortcut(
            QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_Left), self)

        year_move_left.activated.connect(lambda: self.move_slider(-self.year_step))

        month_move_right = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Right), self)
        month_move_right.activated.connect(lambda: self.move_slider(1))

        month_move_left = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Left), self)
        month_move_left.activated.connect(lambda: self.move_slider(-1))

    def set_sizes(self):
        self.setFixedSize(850, 650)
        self.image_label.setFixedSize(QtCore.QSize(796, 552))

        # set year skip buttons to be square and 5 pixels wider than text in them
        font = QtGui.QFont()
        self.move_year_left_button.setFixedWidth(
            QtGui.QFontMetrics(font).boundingRect(self.move_year_left_button.text()).width() + 5)
        self.move_year_right_button.setFixedWidth(
            QtGui.QFontMetrics(font).boundingRect(self.move_year_right_button.text()).width() + 5)

        self.move_year_left_button.setFixedHeight(self.move_year_left_button.width())
        self.move_year_right_button.setFixedHeight(self.move_year_right_button.width())

    def set_ranges_values(self):
        months = ("January", "February", "March", "April", "May", "June", "July",
                  "August", "September", "October", "November", "December")

        self.start_month.addItems(months)
        self.end_month.addItems(months)

        self.image_slider.setRange(0, 0)
        self.image_slider.setValue(0)

        self.start_year.setRange(1850, 2010)
        self.end_year.setRange(1980, 2019)
        self.start_year.setValue(1980)
        self.end_year.setValue(2010)

    def connect_signals(self):
        self.image_slider.valueChanged.connect(self.change_image)

        # ensure only valid dates can be entered
        self.start_month.currentIndexChanged.connect(self.date_changed)
        self.end_year.valueChanged.connect(self.date_changed)
        self.start_year.valueChanged.connect(self.date_changed)

        self.animate_button.pressed.connect(self.animate)
        self.plot_button.pressed.connect(self.plot)
        self.stop_button.pressed.connect(self.quit_current_tasks)

        self.move_year_left_button.pressed.connect(lambda: self.move_slider(-self.year_step))
        self.move_year_right_button.pressed.connect(lambda: self.move_slider(self.year_step))

    def retranslate_ui(self):
        self.setWindowTitle("World Temperature Anomaly Map")
        self.plot_button.setText("Plot")
        self.stop_button.setText("Stop")
        self.animate_button.setText("Play")
        self.preferences_button.setText("Preferences")
        self.move_year_right_button.setText("-->")
        self.move_year_left_button.setText("<--")
        self.move_year_left_button.setToolTip("Skip year")
        self.move_year_right_button.setToolTip("Skip year")

    def show_options(self):
        self.preferences_button.setEnabled(False)
        w = SettingsPop(self.settings, self)
        w.setup_ui()
        w.settings_signal.connect(self.refresh_settings)
        w.close_signal.connect(lambda: self.preferences_button.setEnabled(True))

    def save_default_settings(self):
        if not self.settings.value("Plot step"):
            self.settings.setValue("Playback FPS", 5)
            self.settings.setValue("Plot step", 1)
            self.settings.setValue("Color map", "seismic")
            self.settings.sync()

        self.plot_step = self.settings.value("Plot step", type=int)
        self.play_fps = self.settings.value("Playback FPS", type=int)
        self.color_map = self.settings.value("Color map")
        self.year_step = max(int(12 / self.plot_step), 1)

    def refresh_settings(self, values):
        self.play_fps = int(values[0])
        self.plot_step = int(values[1])
        self.color_map = values[2]
        self.year_step = max(int(12 / self.plot_step), 1)

    def set_status(self, message):
        self.status_bar.showMessage(message)

    def plot(self):
        self.image_count = 0
        QtGui.QPixmapCache.clear()  # clear qt image cache
        self.stop_button.setEnabled(True)
        self.plot_button.setEnabled(False)
        self.animate_button.setEnabled(False)

        # send dates in decimal format to worker
        start_date = self.start_year.value() + (1 + self.start_month.currentIndex() * 2) / 24
        end_date = self.end_year.value() + (1 + self.end_month.currentIndex() * 2) / 24

        self.worker = Plotter(start_date, end_date, self.plot_step, self.color_map, self)
        self.worker.image_increment_signal.connect(self.add_image)
        self.worker.finished.connect(self.del_worker)
        self.worker.status_signal.connect(self.set_status)

        self.worker.start()

    def del_worker(self):
        self.worker.quit()
        self.stop_button.setEnabled(False)
        self.plot_button.setEnabled(True)
        self.animate_button.setEnabled(True)

    def add_image(self):
        self.image_slider.setMaximum(self.image_count)
        # move slider to max value if it was at max before
        if self.image_slider.value() == self.image_slider.maximum() - 1:
            self.move_slider(1)
        self.image_count += 1

    def move_slider(self, amount: int):
        """ move image_slider by value"""
        self.image_slider.setValue(self.image_slider.value() + amount)

    def change_image(self, index):
        pixmap = QtGui.QPixmap(f"{Plotter.PLOTS_DIR}plot{index}")
        self.image_label.setPixmap(pixmap)

    def date_changed(self):
        """Ensure only valid dates can be entered
            if start and end years match, block out months above
            chosen start month in end months

            if year is 2019 allow only January-May range"""

        for item_index in range(0, 12):
            self.start_month.model().item(item_index).setEnabled(True)
            self.end_month.model().item(item_index).setEnabled(True)

        if self.start_year.value() == self.end_year.value():
            for item_index in range(0, self.start_month.currentIndex()):
                self.end_month.model().item(item_index).setEnabled(False)
            if self.end_month.currentIndex() < self.start_month.currentIndex():
                self.end_month.setCurrentIndex(self.start_month.currentIndex())

        if self.start_year.value() == 2019:
            for item_index in range(5, 12):
                self.start_month.model().item(item_index).setEnabled(False)
            if self.start_month.currentIndex() > 4:
                self.start_month.setCurrentIndex(4)

        if self.end_year.value() == 2019:
            for item_index in range(5, 12):
                self.end_month.model().item(item_index).setEnabled(False)
            if self.end_month.currentIndex() > 4:
                self.end_month.setCurrentIndex(4)

        self.start_year.setRange(1850, self.end_year.value())
        self.end_year.setRange(self.start_year.value(), 2019)

    def animate(self):
        self.image_slider.setValue(1)
        self.stop_button.setEnabled(True)
        self.animate_button.setEnabled(False)
        self.animate_timer.timeout.connect(self.animation)
        self.animate_timer.start(int(1000 / self.play_fps))

    def animation(self):
        self.move_slider(1)
        if self.image_slider.value() == self.image_slider.maximum():
            self.stop_animation()
            self.stop_button.setEnabled(False)

    def stop_animation(self):
        self.animate_timer.stop()
        try:
            self.animate_timer.timeout.disconnect()
        except TypeError:
            pass
        self.animate_button.setEnabled(True)

    def quit_current_tasks(self):
        self.stop_plot_signal.emit()
        self.stop_animation()
        self.stop_button.setEnabled(False)

    def closeEvent(self, *args, **kwargs):
        super(QtWidgets.QMainWindow, self).closeEvent(*args, **kwargs)
        try:
            self.worker.clear_plots()
        except AttributeError:
            pass
Ejemplo n.º 29
0
def train(params_path):
    Plotter.load(OUT_FOLDER)
    Parameters.load(params_path)
    environment = Environment()
    agent = Agent(environment)
    agent.train()
def calculate_emissions():
    
    # in this example we will calculate annual CO emissions for the 14 GFED 
    # basisregions over 1997-2014. Please adjust the code to calculate emissions
    # for your own specie, region, and time period of interest. Please
    # first download the GFED4.1s files and the GFED4_Emission_Factors.txt
    # to your computer and adjust the directory where you placed them below
    directory    = '.'


    """
    Read in emission factors
    """
    species = [] # names of the different gas and aerosol species
    EFs     = np.zeros((41, 6)) # 41 species, 6 sources

    k = 0
    f = open(directory+'/GFED4_Emission_Factors.txt')
    while 1:
        line = f.readline()
        if line == "":
            break
            
        if line[0] != '#':
            contents = line.split()
            species.append(contents[0])
            EFs[k,:] = contents[1:]
            k += 1
                    
    f.close()

    plotter = Plotter();
    #totals for three regionally organized tables
    regional_tables = np.zeros((20, 3, 7, 15));
    #totals for three species-organized tables
    species_tables = np.zeros((20, 3, 7, 9));
    for species_num in range(9):
        print " "
        print "Species: " + species_used[species_num]
        EF_species = EFs[species_row[species_num]];
        writers = [];
        for writer_type in range(3):
            writers.append(setup_writer(data_types[writer_type], species_used[species_num], units[writer_type]));
        #calculate and write emissions for this species for each year 1997 - 2014
        for year in range(start_year, end_year+3):
            year_to_use = year;
            start_month = 0;
            identifier = species_used[species_num] + "_" +  str(year);
            #do el nino and la nina years separately -- calculate and write emissions for July 1 to June 30
            if(year == end_year+1):
                year_to_use = 1997;
                start_month = 7;
                identifier = species_used[species_num] + "_1997-1998 El Nino";
            if(year == end_year+2):
                year_to_use = 1998;
                start_month = 7;
                identifier = species_used[species_num] + "_1998-1999 La Nina";

            emissions_table = calculate_species_for_year(directory, species_num, EF_species, year_to_use, start_month);
            # convert to $ value
            scar_table = emissions_table * scar_values[species_num] / GRAMS_PER_TON;
            aq_table = emissions_table * aq_values[species_num] / GRAMS_PER_TON;
            # convert to Tg CO 
            final_emissions_table = emissions_table / 1E12;
            tables = [final_emissions_table, scar_table, aq_table];

            for data_type in range(3):
                regional_tables[year - start_year][data_type] += tables[data_type];
                species_tables[year - start_year][data_type][0:7, species_num] = tables[data_type][0:7, 14];
            
            plot_and_write_table(tables, writers, plotter, identifier);
        print species_used[species_num] + " done";
    #calculate total emissions by adding up the results from each species, for each year
    for year in range(20):
        year_description = str(start_year+year);
        if(year + start_year == end_year+1):
            year_description = "1997-1998 El Nino";
        if(year + start_year == end_year+2):
            year_description = "1998-1999 La Nina";
        plot_regions_table(regional_tables[year], plotter, year_description + " regional totals");
        plot_species_table(species_tables[year], plotter, year_description + " all species");
Ejemplo n.º 31
0
def plot_conv_layers(params_path):
    Parameters.load(params_path)
    environment = Environment()
    agent = Agent(environment)
    Plotter.plot_conv_layers(agent)
Ejemplo n.º 32
0
    def main(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('--type', dest='type', default='original')
        parser.add_argument('--percentage', dest='percentage', default='5')
        parser.add_argument('--latex', dest='latex', default=False)
        args = parser.parse_args()
        type = args.type
        percentage = args.percentage
        print_latex = args.latex

        stats_f1_list = []
        f1_list = []
        stats_em_list = []
        em_list = []
        names_list = []
        plotter = Plotter(type, percentage)

        if type == 'original':
            dev_pattern_file = config.ORIGINAL_CONFIG['dev_pattern_file']
            models_to_process = config.ORIGINAL_CONFIG['models_to_process']
        elif type == 'class_dev':  # preds on 5% of training (pre-evaluation), trained with splitted training
            dev_pattern_file = config.CLASS_DEV_CONFIG['dev_pattern_file']
            models_to_process = config.CLASS_DEV_CONFIG['models_to_process']
        elif type == 'dev_on_splitted':  # preds on original dev, trained with splitted training
            dev_pattern_file = config.DEV_ON_SPLITTED_CONFIG[
                'dev_pattern_file']
            models_to_process = config.DEV_ON_SPLITTED_CONFIG[
                'models_to_process']
        elif type == 'ensemble':
            # original dev to construct id_to_type_dict
            print('\n 1. Step: original dev to construct id_to_type_dict\n')
            dev_pattern_file = config.ORIGINAL_CONFIG['dev_pattern_file']
            id_to_type = self.get_id_to_type_dict(dev_pattern_file)
            for k, v in id_to_type.items():
                self.id_to_type_dict[k] = v

            # class_dev to obtain weights
            print('\n 2. Step: class_dev to obtain weights\n')
            dev_pattern_file = config.CLASS_DEV_CONFIG['dev_pattern_file']
            models_to_process = config.CLASS_DEV_CONFIG['models_to_process']
            self.stats.type_to_count_dict = self.count_question_types(
                dev_pattern_file, print_latex)
            self.stats.print_latex = print_latex

            for model in models_to_process:
                name = model[0]
                file = model[1]
                results = self.analyze_model(name, file, dev_pattern_file)
                stats_f1_list.append(results['f1'][0])
                f1_list.append(results['f1'][1])
                stats_em_list.append(results['em'][0])
                em_list.append(results['em'][1])
                names_list.append(name)

            # self.stats.summarize()
            plotter.plot_bar(stats_f1_list, f1_list, names_list, 'F1',
                             'class_dev')
            plotter.plot_bar(stats_em_list, em_list, names_list, 'EM',
                             'class_dev')

            weights = self.ensembler.count_weights(stats_f1_list, names_list,
                                                   'F1')
            weights_updated = self.ensembler.update_undefined_type_weight(
                weights, names_list, f1_list)

            # dev_on_splitted to get candidate answers
            print('\n 3. Step: dev_on_splitted to get candidate answers\n')
            models_to_process = config.ORIGINAL_CONFIG['models_to_process']
            candidate_predictions = self.get_candidate_predictions(
                models_to_process)

            # ensemble.predict to get ensemble answers -> save to file
            print(
                '\n 4. Step: ensemble.predict to get ensemble answers -> save to file\n'
            )
            ensemble_predictions = self.ensembler.predict(
                candidate_predictions, self.id_to_type_dict, weights_updated)
            with open(config.ENSEMBLE_FILE, 'w') as f:
                json.dump(ensemble_predictions, f)

            # evaluate ensemble predictions (vs. 100% of training results)
            # ??? vs. splitted or full training
            print(
                '\n 5. Step: evaluate ensemble predictions (vs. 100% training results)\n'
            )
            dev_pattern_file = config.ORIGINAL_CONFIG['dev_pattern_file']
            models_to_process = config.ORIGINAL_CONFIG['models_to_process']
            models_to_process.append(('Ensemble', config.ENSEMBLE_FILE))
            print(models_to_process)

            stats_f1_list = []
            f1_list = []
            stats_em_list = []
            em_list = []
            names_list = []
            for model in models_to_process:
                name = model[0]
                print('\nAnalysing {}...'.format(name))
                file = model[1]
                results = self.analyze_model(name, file, dev_pattern_file)
                stats_f1_list.append(results['f1'][0])
                f1_list.append(results['f1'][1])
                stats_em_list.append(results['em'][0])
                em_list.append(results['em'][1])
                names_list.append(name)

            # self.stats.summarize()

            plotter.type = 'ensemble'
            plotter.plot_bar(stats_f1_list, f1_list, names_list, 'F1', type)
            plotter.plot_bar(stats_em_list, em_list, names_list, 'EM', type)

        else:
            print(
                'type must be original, class_dev, dev_on_splitted or ensemble'
            )
            sys.exit(1)

        self.stats.type_to_count_dict = self.count_question_types(
            dev_pattern_file, print_latex)
        self.stats.print_latex = print_latex

        if type != 'ensemble':
            for model in models_to_process:
                name = model[0]
                print('\nAnalysing {}...'.format(name))
                file = model[1]
                results = self.analyze_model(name, file, dev_pattern_file)
                stats_f1_list.append(results['f1'][0])
                f1_list.append(results['f1'][1])
                stats_em_list.append(results['em'][0])
                em_list.append(results['em'][1])
                names_list.append(name)

            self.stats.summarize()

            plotter.plot_bar(stats_f1_list, f1_list, names_list, 'F1', type)
            plotter.plot_bar(stats_em_list, em_list, names_list, 'EM', type)
Ejemplo n.º 33
0
from data_reader import Data
from datapipeline import Datapipeline
from plot import Plotter
from gbm_model import Model

BASE_URL = "http://*****:*****@app.route('/')
def index():
    return render_template('index.html', base_url=BASE_URL)


@app.route('/plot/get_poverty_breakdown', methods=['GET'])
def get_poverty_breakdown():
    bytes_obj = plotter.plot_poverty_breakdown()
Ejemplo n.º 34
0
from visualizer import visualizer
from plot import Plotter
from audio import AudioConnection
from config import Config

CONFIG = Config()

if __name__ == "__main__":
    p = Plotter(CONFIG)
    conn = AudioConnection(CONFIG)
    visualizer(p, conn, CONFIG)