Пример #1
0
def data_vis_freq():
    global img
    resp = requests.get(url+"meals/data/1", json = login_info)
    data1 = resp.json()["response"]
    myviz2 = Viz()
    myviz2.viz_user_freq(str(data1))
    img = ImageTk.PhotoImage(Image.open("viz.png"))  
    data = Toplevel()
    panel = Label(data, image = img)
    panel.image = img
    panel.pack()
Пример #2
0
def owner_data_money():
    global img
    resp = requests.get(url+"restaurants/data/1", json = login_info)
    data1 = resp.json()["response"]
    print(data1)
    myviz = Viz()
    myviz.viz_rest_money(str(data1))
    img = ImageTk.PhotoImage(Image.open("viz.png"))  
    data = Toplevel()
    panel = Label(data, image = img)
    panel.image = img
    panel.pack()
Пример #3
0
    def plot(exps, n_epochs, fig_name, y_lim):

        viz = Viz(
            exps = exps,
            n_epochs = n_epochs,
            x_label = 'epoch',
            y_label = 'perplexity',
            x_lim = None,
            y_lim = y_lim,
            title = '',
            fig_name = fig_name
        )
        viz.plot()
Пример #4
0
 def __init__(self,
              optim=torch.optim.Adam,
              optim_args={},
              loss_func=torch.nn.MSELoss(),
              saveDir='../models/',
              vis=False):
     optim_args_merged = self.default_adam_args.copy()
     optim_args_merged.update(optim_args)
     self.optim_args = optim_args_merged
     self.optim = optim
     self.loss_func = loss_func
     self.saveDir = saveDir
     self.visdom = Viz() if vis else False
     self._reset_history()
Пример #5
0
def json_response():
    if 'viz_id' in request.args:
        viz_id = request.args['viz_id']
        errors = Viz(viz_id).json_response()
        js = json.dumps(errors)
        resp = Response(js, status=200, mimetype='application/json')
        return resp
    else:
        return 'Error'
Пример #6
0
def main():

    fh = FileHandler()  #s3_bucket='fmi-sasse-classification-dataset')
    viz = Viz()

    data = pd.read_csv(options.test_dataset_file)
    data = data.loc[data['weather_parameter'] == 'WindGust']

    X = data.loc[:, options.feature_params]
    y = data.loc[:, options.label].values.ravel()

    model = fh.load_model(options.save_path)
    evaluate(model, options, data=(X, y), fh=fh, viz=viz)
Пример #7
0
def run_tc(rmgr, tgt, launcher, visualizer, tc, pwd):
    '''
    For the given set of tasks, do the following:

      - find a set of resources to run them (schedule)
      - prepare a shell script and execute it via `fork` (popen).
      - wait for it to to complete.

    If a task cannot be scheduled, it is put on a wait list, and reschedule is
    attempted when some other task finished and frees resources
    '''

    rm = RM.create(rmgr, tgt)

    # prepare node list, create ctasks for this tc
    nodes = rm.get_nodes(tc)
    tasks = create_tasks(tc, pwd)

    v = None  # visualizer
    lm = None  # launch method

    try:
        v = Viz.create(visualizer, nodes, rm.cpn, rm.gpn, tasks)
        v.header('test case: %s [ %s ]' % (tc['uid'], launcher))

        v.text(None)  # reset text part
        v.text('nodes  : %s' % tc['nodes'])
        v.text('procs  : %s' % tc['procs'])
        v.text('threads: %s' % tc['threads'])
        v.text('gpus   : %s' % tc['gpus'])
        v.text('tasks  : %s' % tc['tasks'])
        v.update()

        lm = LM.create(launcher, nodes)

        waiting = tasks
        scheduled = list()
        running = list()
        done = list()
        first = True  # first iteration

        i = 0
        while True:

            # is there *anything* to do?
            if not waiting and not scheduled and not running:
                break

            v.update()

            # are there any tasks to collect / resources to be freed?
            running, collected = wait_tasks(lm, pwd, nodes, running)
            done += collected

            v.update()

            # are there any tasks waiting, and do we have resources?
            if first or collected:
                first = False
                scheduled, waiting = schedule_tasks(tc, rm, nodes, waiting)
                v.update()

            # execute scheduled tasks
            running += execute_tasks(lm, pwd, scheduled)
            scheduled = list()

            ## # slow down, matey!
            ## time.sleep(0.1)
            i += 1

        v.update()

    finally:
        if v: v.close()
        if lm: lm.close()

    # summary:
    summary = '%s %s ' % (tc['uid'], launcher)

    if not nodes:
        summary += '0 0 0 0 0 0 '

    else:
        c_total = 0
        c_busy = 0
        c_free = 0

        g_total = 0
        g_busy = 0
        g_free = 0

        for node in nodes:

            c_total += len(node[1])
            c_busy += node[1].count(BUSY)
            c_free += node[1].count(FREE)

            g_total += len(node[2])
            g_busy += node[2].count(BUSY)
            g_free += node[2].count(FREE)

        summary += '%d %d %d %d %d %d %d ' \
                % (len(nodes), c_total, c_busy, c_free, g_total, g_busy, g_free)

    if not tasks:
        summary += '0 0 0 0 0 0 0 0'

    else:
        t_total = len(tasks)
        t_new = len([1 for t in tasks if t['state'] == NEW])
        t_waiting = len([1 for t in tasks if t['state'] == WAITING])
        t_scheduled = len([1 for t in tasks if t['state'] == SCHEDULED])
        t_running = len([1 for t in tasks if t['state'] == RUNNING])
        t_done = len([1 for t in tasks if t['state'] == DONE])
        t_failed = len([1 for t in tasks if t['state'] == FAILED])
        t_misplaced = len([1 for t in tasks if t['state'] == MISPLACED])

        summary += '%d %d %d %d %d %d %d %d' \
                % (t_total,   t_new,  t_waiting, t_scheduled,
                   t_running, t_done, t_failed,  t_misplaced)

    return summary
Пример #8
0
def check_viz():
    if 'viz_id' in request.args:
        viz_id = request.args['viz_id']
        return viz_qa(viz_id)
    else:
        background = requests.get('http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US')
        bkgd_json = background.json()
        img_url = 'http://www.bing.com' + bkgd_json['images'][0]['url']
        html_string = '''
            <!doctype html>
            <head>
                <title>Q/A</title>
                <link href="//netdna.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" rel="stylesheet" media="screen">
                <style>
                    body {
                    background: url(''' + img_url + ''') no-repeat center center fixed;
                    -webkit-background-size: cover;
                    -moz-background-size: cover;
                    -o-background-size: cover;
                    background-size: cover;
                    }
                    .container, .container-box {
                    text-align: center;
                    margin: auto;
                    background: white;
                    padding: 1px;
                    padding-bottom: 20px;
                    margin-top: 25px;
                    display: block;
                    }
                    .container-box {
                    width: 230px;
                    }
                    form {
                        display: inline-block;
                        left-margin: auto;
                        right-margin: auto;
                    }
                    table {
                        margin: auto;
                        margin-top: 30px;
                        color: #333;
                        font-family: monospace;
                        width: 640px;
                        border-collapse:
                        collapse; border-spacing: 0;
                    }

                    td, th {
                        border: 1px solid #CCC;
                        height: 30px;
                    }

                    th {
                        background: #F3F3F3;
                        font-weight: bold;
                        text-align: left;
                        padding: 5px;
                    }

                    td {
                        background: #FAFAFA;
                        padding: 5px;
                    }
                </style>
            </head>
            <div class="container-box">
                <h1>Viz URL</h1>
                <form role="form" method='POST' action='/qa'>
                    <div class="form-group">
                      <input type="text" name="url" class="form-control" id="url-box" placeholder="Enter URL..." style="max-width: 300px;" autofocus required>
                    </div>
                    <button type="submit" class="btn btn-default">Submit</button>
                </form>
            </div>
                '''
        if request.method == "POST":
            try:
                viz_url = request.form['url']
                json_pat = re.compile(r'(.*.com\/w\/|.*\/preview\/)(\w*)')
                matches = json_pat.findall(viz_url)
                viz_id = matches[0][1]
            except:
                return "There was an error!"
            try:
                viz = Viz(viz_id)
                errors = viz.json_response()
                html_string += '''
                            <table>
                            <tr>
                                <th>Error Type</th>
                                <th>Error Message</th>
                            </tr>
                            '''
                # Parse error json and create 1 sentence per error, storing each sentence in error_list
                for item in errors:
                    for key, value in item.iteritems():
                        error_type = key
                        if error_type == 'Timeline errors':
                            for slide, messages in value.iteritems():
                                for message in messages:
                                    html_string = html_string + "<tr><td>" + slide + "</td><td>" + message.encode("utf-8") + "</td></tr>"
                        else:
                            for error_val in value:
                                try:
                                    html_string = html_string + '<tr><td>' + error_type + '</td><td>' + error_val.encode('utf-8') + '</td></tr>'

                                except:
                                    for sub_error in error_val:
                                        html_string = html_string + '<tr><td>' + error_type + '</td><td>' + sub_error.encode('utf-8') + '</td></tr>'
                html_string = html_string + '</table><br><div class="ftb-widget" data-widget-id="' + viz_id + '"></div><script async src="https://s.graphiq.com/rx/widgets.js"></script>'
            except:
                return 'There was an error with the API! Let Corie know.'
                html_string = html_string + viz_qa(viz_id)
        return html_string
Пример #9
0
def initialize_qapi():
    background = requests.get('http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US')
    bkgd_json = background.json()
    img_url = 'http://www.bing.com' + bkgd_json['images'][0]['url']
    html_string = '''
            <!doctype html>
            <head>
                <title>Q/A</title>
                <link href="//netdna.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" rel="stylesheet" media="screen">
                <style>
                body {
                background: url(''' + img_url + ''') no-repeat center center fixed;
                -webkit-background-size: cover;
                -moz-background-size: cover;
                -o-background-size: cover;
                background-size: cover;
                }
                .container, .container-box {
                text-align: center;
                margin: auto;
                background: white;
                padding: 1px;
                padding-bottom: 20px;
                margin-top: 25px;
                display: block;
                }
                .container {
                width: 450px;
                }
                .container-box {
                width: 230px;
                }
                form {
                    display: inline-block;
                    left-margin: auto;
                    right-margin: auto;
                }
                </style>
                </head>
                <div class="container-box">
                    <h1>Viz URL</h1>
                    <form role="form" method='POST' action='/qapi'>
                        <div class="form-group">
                          <input type="text" name="url" class="form-control" id="url-box" placeholder="Enter URL..." style="max-width: 300px;" autofocus required>
                        </div>
                        <button type="submit" class="btn btn-default">Submit</button>
                    </form>
                </div>
                '''
    if request.method == "POST":
        try:
            viz_url = request.form['url']
            json_pat = re.compile(r'(.*.com\/w\/|.*\/preview\/)(\w*)')
            matches = json_pat.findall(viz_url)
            viz_id = matches[0][1]
        except:
            return 'Error parsing viz ID from URL. Make sure to submit a viz preview link!'
        try:
            viz = Viz(viz_id)
            errors = viz.json_response()
            error_list = []
            # Parse error json and create 1 sentence per error, storing each sentence in error_list
            for item in errors:
                for key, value in item.iteritems():
                    error_type = key
                    if error_type == 'Timeline errors':
                        for slide, messages in value.iteritems():
                            for message in messages:
                                error = slide + ': ' + message.encode('utf-8')
                                error_list.append(error)
                    else:
                        for error_val in value:
                            try:
                                error = error_type + ': ' + error_val.encode('utf-8')
                                error_list.append(error)
                            except:
                                for sub_error in error_val:
                                    error = error_type + ': ' + sub_error.encode('utf-8')
                                    error_list.append(error)
            # Create new QA task
            task_data = '"{\\"data\\": {  \\"notes\\" : \\"' + viz_url + '\\" ,  \\"projects\\" : \\"244458016124389\\" , \\"name\\" : \\"' + viz.title + '\\" } }"'
            task_shell_command = '/usr/bin/curl -s --request POST -H "Authorization: Bearer 0/a3244bb3177f3e0c7242c459c4324863" -H "Content-Type: application/json" https://app.asana.com/api/1.0/tasks -d ' + task_data
            new_task = subprocess.check_output(task_shell_command, shell=True)
            new_task_id = json.loads(new_task)['data']['id']
            task_url = 'https://app.asana.com/0/244458016124389/' + str(new_task_id)
            subtask_url = 'https://app.asana.com/api/1.0/tasks/' + str(new_task_id) + '/subtasks'
            html_string = html_string + '<div class="container">' + task_url + '<br><a href="' + task_url + '" target="_blank">Go!</a></div>'

            # Post each error to Asana
            for error_text in sorted(error_list, reverse=True):
                subtask_data = '"{\\"data\\": {  \\"name\\" : \\"' + error_text + '\\"  } }"'
                subtask_shell_command = '/usr/bin/curl -s --request POST -H "Authorization: Bearer 0/a3244bb3177f3e0c7242c459c4324863" -H "Content-Type: application/json" ' + subtask_url + ' -d ' + subtask_data
                subprocess.check_output(subtask_shell_command, shell=True)

            # QA 1 and 2 subtasks
            qa2_shell = '/usr/bin/curl -s --request POST -H "Authorization: Bearer 0/a3244bb3177f3e0c7242c459c4324863" -H "Content-Type: application/json" ' + subtask_url + ' -d "{\\"data\\": {  \\"name\\" : \\"QA 2\\"  } }"'
            qa1_shell = '/usr/bin/curl -s --request POST -H "Authorization: Bearer 0/a3244bb3177f3e0c7242c459c4324863" -H "Content-Type: application/json" ' + subtask_url + ' -d "{\\"data\\": {  \\"name\\" : \\"QA 1\\"  } }"'
            subprocess.check_output(qa2_shell, shell=True)
            subprocess.check_output(qa1_shell, shell=True)
        except:
            return 'There was an error with the API- let Corie know!'
    return html_string
Пример #10
0
    # Parse command line arguments
    args = sys.argv[1:]
    VARIANCE_CUTOFF = float(args[0])
    TIME_MIN = int(args[1])
    TIME_MAX = int(args[2])
    SOURCE_FILE = args[3]
    PLOT_FILE = args[4]
    NUM_CHANNELS = int(args[5])

    # Build and run anomaly detector
    anomaly_file = "spectrum4.csv"
    detector = AstroHTM(SOURCE_FILE + '.fits',
                        VARIANCE_CUTOFF,
                        headers,
                        model_params.MODEL_PARAMS,
                        anomaly_file,
                        select_cols=True)
    detector.runAstroAnomaly()

    # Write original spectra to csv
    spectrum_file = SOURCE_FILE + '.csv'
    detector.data.write_data_to_csv(spectrum_file)

    # Visualize original spectra with anomalies
    viz = Viz(spectrum_file, TIME_MIN, TIME_MAX, cutoffs=detector.data.cutoffs)
    viz.choose_spectra(0, NUM_CHANNELS)
    viz.add_anomalies(anomaly_file)
    #print viz.df
    viz.plot(PLOT_FILE)
    print "Plot was saved to", PLOT_FILE
Пример #11
0
class Solver(object):
    default_adam_args = {
        "lr": 1e-4,
        "betas": (0.9, 0.999),
        "eps": 1e-8,
        "weight_decay": 0.0
    }

    def __init__(self,
                 optim=torch.optim.Adam,
                 optim_args={},
                 loss_func=torch.nn.MSELoss(),
                 saveDir='../models/',
                 vis=False):
        optim_args_merged = self.default_adam_args.copy()
        optim_args_merged.update(optim_args)
        self.optim_args = optim_args_merged
        self.optim = optim
        self.loss_func = loss_func
        self.saveDir = saveDir
        self.visdom = Viz() if vis else False
        self._reset_history()

    def train(self,
              model,
              train_loader,
              val_loader,
              num_epochs=10,
              log_nth=0,
              checkpoint={}):
        """
    Train a given model with the provided data.

    Inputs:
    - model: model object initialized from a torch.nn.Module
    - train_loader: train data in torch.utils.data.DataLoader
    - val_loader: val data in torch.utils.data.DataLoader
    - num_epochs: total number of training epochs
    - log_nth: log training accuracy and loss every nth iteration
    - checkpoint: object used to resume training from a checkpoint
    """
        optim = self.optim(
            filter(lambda p: p.requires_grad, model.parameters()),
            **self.optim_args)
        scheduler = False  # torch.optim.lr_scheduler.ReduceLROnPlateau(optim)

        iter_per_epoch = len(train_loader)
        start_epoch = 0
        best_val_acc = 0.0
        is_best = False

        if len(checkpoint) > 0:
            start_epoch = checkpoint['epoch']
            best_val_acc = checkpoint['best_val_acc']
            optim.load_state_dict(checkpoint['optimizer'])
            self._load_history()
            print("=> Loaded checkpoint (epoch {:d})".format(
                checkpoint['epoch']))

        if self.visdom:
            iter_plot = self.visdom.create_plot('Epoch', 'Loss', 'Train Loss',
                                                {'ytype': 'log'})

        ########################################################################
        # The log should like something like:                                  #
        #   ...                                                                #
        #   [Iteration 700/4800] TRAIN loss: 1.452                             #
        #   [Iteration 800/4800] TRAIN loss: 1.409                             #
        #   [Iteration 900/4800] TRAIN loss: 1.374                             #
        #   [Epoch 1/5] TRAIN   loss: 0.560/1.374                              #
        #   [Epoch 1/5] VAL acc/loss: 53.90%/1.310                              #
        #   ...                                                                #
        ########################################################################

        for epoch in range(start_epoch, num_epochs):
            # TRAINING
            model.train()
            train_loss = 0

            for i, (inputs, targets) in enumerate(train_loader, 1):
                inputs, targets = Variable(inputs), Variable(targets)
                if model.is_cuda:
                    inputs, targets = inputs.cuda(), targets.cuda()

                optim.zero_grad()
                outputs = model(inputs)
                #outputs = utils.heatmaps_to_3d_joints(outputs, inputs.size(1))
                loss = self.loss_func(outputs, targets)
                loss.backward()
                optim.step()

                self.train_loss_history.append(loss.data.cpu().numpy())
                if log_nth and i % log_nth == 0:
                    last_log_nth_losses = self.train_loss_history[-log_nth:]
                    train_loss = np.mean(last_log_nth_losses)
                    print('[Iteration {:d}/{:d}] TRAIN loss: {:.2f}'.format(
                        i + epoch * iter_per_epoch,
                        iter_per_epoch * num_epochs, train_loss))

                    if self.visdom:
                        self.visdom.update_plot(x=epoch + i / iter_per_epoch,
                                                y=train_loss,
                                                window=iter_plot,
                                                type_upd="append")

            if log_nth:
                print('[Epoch {:d}/{:d}] TRAIN   loss: {:.2f}'.format(
                    epoch + 1, num_epochs, train_loss))

            # VALIDATION
            if len(val_loader):
                val_acc, val_loss = self.test(model, val_loader)
                self.val_acc_history.append(val_acc)
                self.val_loss_history.append(val_loss)

                # Set best model to the one with highest validation set accuracy
                is_best = val_acc >= best_val_acc
                best_val_acc = max(val_acc, best_val_acc)

                # Reduce LR progressively
                if scheduler:
                    scheduler.step(val_acc)

                if log_nth:
                    print(
                        '[Epoch {:d}/{:d}] VAL acc/loss: {:.2%}/{:.2f}'.format(
                            epoch + 1, num_epochs, val_acc, val_loss))

            self._save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_val_acc': best_val_acc,
                    'optimizer': optim.state_dict(),
                }, is_best)

    def test(self, model, test_loader, tolerance=0.05):
        """
    Test a given model with the provided data.

    Inputs:
    - model: model object initialized from a torch.nn.Module
    - test_loader: test data in torch.utils.data.DataLoader
    - tolerance: acceptable percentage error used to compute test accuracy
    """
        test_loss = AverageMeter()
        acc_per_axis = AverageMeter()
        model.eval()

        for inputs, targets in test_loader:
            inputs, targets = Variable(inputs), Variable(targets)
            if model.is_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()

            outputs = model.forward(inputs)
            #outputs = utils.heatmaps_to_3d_joints(outputs)
            if outputs.size() != targets.size():
                temp = torch.zeros(targets.size())
                joint_idx = [
                    8, 6, 15, 16, 17, 10, 11, 12, 24, 25, 26, 19, 20, 21, 5, 4,
                    7
                ]  # idx+1
                for idx, val in enumerate(joint_idx):
                    temp[:, idx * 3] = outputs[:, (val - 1) * 3]
                    temp[:, idx * 3 + 1] = outputs[:, (val - 1) * 3 + 1]
                    temp[:, idx * 3 + 2] = outputs[:, (val - 1) * 3 + 2]
                outputs = temp

            loss = self.loss_func(outputs, targets)
            test_loss.update(loss.data.cpu().numpy())

            diffs = (
                outputs -
                targets).pow(2).data.cpu().numpy()  #same as loss if MSELoss
            max_diff = np.square(inputs.size(-1) * tolerance)
            pred_array = sum(diff <= max_diff for diff in diffs).reshape(-1, 3)
            acc_per_axis.update(pred_array.mean(axis=0) / inputs.size(0))

        # print('\n=> Test accuracy: x={:.2%} y={:.2%} z={:.2%}'.format(acc_per_axis.avg[0],

    #                                                                 acc_per_axis.avg[1],
    #                                                                 acc_per_axis.avg[2]))
        test_acc = acc_per_axis.avg.mean()
        test_loss = float(test_loss.avg)
        return test_acc, test_loss

    def _save_checkpoint(self, state, is_best, fname='checkpoint.pth'):
        """
    Save current state of training and trigger method to save training history.
    """
        print('Saving at checkpoint...')
        path = os.path.join(self.saveDir, fname)
        torch.save(state, path)
        self._save_history()
        if is_best:
            shutil.copyfile(path, os.path.join(self.saveDir, 'model_best.pth'))

    def _reset_history(self):
        """
    Resets train and val histories.
    """
        self.train_loss_history = []
        self.val_acc_history = []
        self.val_loss_history = []

    def _save_history(self, fname="train_history.npz"):
        """
    Save training history. Conventionally the fname should end with "*.npz".
    """
        np.savez(os.path.join(self.saveDir, fname),
                 train_loss_history=self.train_loss_history,
                 val_loss_history=self.val_loss_history,
                 val_acc_history=self.val_acc_history)

    def _load_history(self, fname="train_history.npz"):
        """
    Load training history. Conventionally the fname should end with "*.npz".
    """
        npzfile = np.load(os.path.join(self.saveDir, fname))
        self.train_loss_history = npzfile['train_loss_history'].tolist()
        self.val_acc_history = npzfile['val_acc_history'].tolist()
        self.val_loss_history = npzfile['val_loss_history'].tolist()
Пример #12
0
def main():

    if hasattr(options, 'dask'): client = Client('{}:8786'.format(options.dask))
    else: client = Client()

    logging.info(client)

    if hasattr(options, 's3_bucket'):
        fh = FileHandler(s3_bucket=options.s3_bucket)
        viz = Viz(io=fh)
    else:
        fh = FileHandler()
        viz = Viz()

    datasets = fh.read_data([options.train_data, options.test_data], options)

    X_train = datasets[0][0]
    y_train = datasets[0][1]
    X_test = datasets[1][0]
    y_test = datasets[1][1]

    # Train
    if options.model == 'svct':
        model = SVCT(verbose=True)
    elif options.model == 'gp':
        kernel = PairwiseKernel(metric='laplacian') *  DotProduct()
        model = GaussianProcessClassifier(kernel=kernel, n_jobs=-1)
    elif options.model == 'rfc':
        # param_grid_rfc = {
        # "n_estimators": [10, 100, 150, 200, 250, 500],
        # "max_depth": [20, 50, 100, None],
        # "max_features": ["auto", "log2", None],
        # "min_samples_split": [2,5,10],
        # "min_samples_leaf": [1, 2, 4],
        # "bootstrap": [False]
        # }

        # Fetched using 5-fold cv with random search from params above
        if "national" in options.dataset:
            params = {'n_estimators': 500, 'min_samples_split': 2, 'min_samples_leaf': 1, 'max_features': 'sqrt', 'max_depth': None, 'bootstrap': False, 'n_jobs': -1}
        else:
            params = {'n_estimators': 250, 'min_samples_split': 2, 'min_samples_leaf': 10, 'max_features': None, 'max_depth': 20, 'bootstrap': False, 'n_jobs': -1}

        model = RandomForestClassifier(**params)

    elif options.model == 'gnb':
        model = GaussianNB()
    else:
        raise Exception('Model not defined')

    logging.info('Training...')
    if options.model == 'gnb':
        priors = []
        for i in np.arange(0,1,.05):
            for j in np.arange(0, 1-i, .05):
                k = 1 - i - j
                priors.append([i, j, k])

        param_grid_gnb = {
        'priors': priors+[None],
        'var_smoothing': expon(scale=.01)
        }
        model, cv_results = cv(model, param_grid_gnb, X_train, y_train, n_iter=500)
    else:
        with joblib.parallel_backend('dask'):
            model.fit(X_train, y_train)

    # Evaluate
    y_pred_train = model.predict(X_train)
    logging.info('Training report:\n{}'.format(classification_report(y_train, y_pred_train)))

    y_pred = model.predict(X_test)
    logging.info('Validation report:\n{}'.format(classification_report(y_test, y_pred)))

    fname = '{}/confusion_matrix_testset.png'.format(options.output_path)
    viz.plot_confusion_matrix(y_test, y_pred, np.arange(3), filename=fname)

    fname = '{}/confusion_matrix_testset_normalised.png'.format(options.output_path)
    viz.plot_confusion_matrix(y_test, y_pred, np.arange(3), True, filename=fname)

    if options.model == 'rfc':
        # Sort feature importances in descending order and rearrange feature names accordingly
        indices = np.argsort(model.feature_importances_)[::-1]
        names = [options.feature_params[i] for i in indices]
        importances = model.feature_importances_[indices]

        fname = '{}/feature_importances.png'.format(options.output_path)

        viz.rfc_feature_importance(importances, fname, names)

    if options.model == 'svct':
        fh.save_svct(model, options.save_path)
    else:
        fh.save_model(model, options.save_path)
Пример #13
0
    0.75600314, 0.40023685, -0.04447079, 0.57812, 0.57812, 0.40023685,
    1.3785939, 0.13341236, -0.5114138, 0.26682448, -1.0228276, 0.31129527,
    -1.1117692, 0.35576606, -1.2007108, 0.93388605, -1.0228276, 1.1117692,
    -0.31129527, 1.0228276, 0.22235394, -0.40023696, -1.0228276, -0.8449447,
    -0.40023685, -1.1117692, 0.13341236
])
fixed_x = torch.from_numpy(fixed_data).float().to(device)

optimizerG = torch.optim.Adam(netG.parameters(), lr=args.lr)
optimizerD = torch.optim.Adam(netD.parameters(), lr=args.lr)

iter_per_epoch = len(data_loader)
start_epoch = torch.load(args.model)['epoch'] if args.model else 0

if args.visdom:
    visdom = Viz()
    viz_D = visdom.create_plot('Epoch', 'Loss', 'Loss Discriminator')
    viz_G = visdom.create_plot('Epoch', 'Loss', 'Loss Generator')
    viz_WD = visdom.create_plot('Epoch', 'WD', 'Wasserstein Distance')
    viz_GP = visdom.create_plot('Epoch', 'GP', 'Gradient Penalty')
    viz_img = np.transpose(utils.create_img(fixed_data), (2, 0, 1))
    viz_img = visdom.create_img(viz_img, title='2D Sample')

print('Start')
for epoch in range(start_epoch, args.epochs):
    netD.train()
    netG.train()
    loss_G = 0

    for i, data in enumerate(data_loader, 1):
        batch_size = data.size(0)
Пример #14
0
    def save_checkpoint(self, training_agent, sim_number):

        if self.train_saving is not None and self.train_saving(sim_number):
            save_path = 'Models/'
            if not os.path.exists(save_path):
                os.makedirs(save_path)

            with open(save_path + 'metrics' + '.pkl', "wb") as f:
                pickle.dump(self.metrics, f)
            # Save model
            training_agent.brain.save_model(save_path, str(sim_number))


if __name__ == '__main__':

    viz = Viz(600, save_dir='gifs/')

    def viz_execution(sim_number):
        return sim_number % 250 == 0 or sim_number == 1

    sim = Sim(allies=5,
              opponents=5,
              world_size=(10, 10),
              n_games=400000,
              train_batch_size=32,
              replay_mem_limit=200000,
              viz=viz,
              viz_execution=viz_execution,
              train_saving=viz_execution)
    sim.run()