예제 #1
0
def test_html_str(mocked_run):
    html = wandb.Html("<html><body><h1>Hello</h1></body></html>")
    html.bind_to_run(mocked_run, "rad", "summary")
    wandb.Html.seq_to_json([html], mocked_run, "rad", "summary")
    assert os.path.exists(html._path)
예제 #2
0
def train(output_trajectory_train,
          output_trajectory_train_mask,
          output_trajectory_train_lengths,
          output_trajectory_train_startpos,
          output_trajectory_train_xyz,
          input_trajectory_train,
          input_trajectory_train_mask,
          input_trajectory_train_lengths,
          input_trajectory_train_startpos,
          model,
          output_trajectory_val,
          output_trajectory_val_mask,
          output_trajectory_val_lengths,
          output_trajectory_val_startpos,
          output_trajectory_val_xyz,
          input_trajectory_val,
          input_trajectory_val_mask,
          input_trajectory_val_lengths,
          input_trajectory_val_startpos,
          hidden,
          cell_state,
          projection_matrix,
          camera_to_world_matrix,
          optimizer,
          visualize_trajectory_flag=True,
          min_val_loss=2e10,
          model_checkpoint_path='./model/',
          visualization_path='./visualize_html/'):
    # Training RNN/LSTM model
    # Run over each example
    n_epochs = 500
    # Initial hidden layer for the first RNN Cell
    # Train a model
    for epoch in range(1, n_epochs + 1):
        hidden = rnn_model.initHidden(batch_size=args.batch_size)
        cell_state = rnn_model.initCellState(batch_size=args.batch_size)
        # Training mode
        model.train()
        optimizer.zero_grad()  # Clear existing gradients from previous epoch
        # Forward PASSING
        # Forward pass for training a model
        output_train, (_, _) = model(input_trajectory_train,
                                     hidden,
                                     cell_state,
                                     lengths=input_trajectory_train_lengths)
        # (This step we get the displacement of depth by input the displacement of u and v)
        # Apply cummulative summation to output using cumsum_trajectory function
        output_train, input_trajectory_train_temp = cumsum_trajectory(
            output=output_train,
            trajectory=input_trajectory_train,
            trajectory_startpos=input_trajectory_train_startpos)
        # Project the (u, v, depth) to world space
        output_train = pt.stack([
            projectToWorldSpace(screen_space=input_trajectory_train_temp[i],
                                depth=output_train[i],
                                projection_matrix=projection_matrix,
                                camera_to_world_matrix=camera_to_world_matrix)
            for i in range(output_train.shape[0])
        ])
        # Evaluating mode
        model.eval()
        # Forward pass for validate a model
        output_val, (_, _) = model(input_trajectory_val,
                                   hidden,
                                   cell_state,
                                   lengths=input_trajectory_val_lengths)
        # (This step we get the displacement of depth by input the displacement of u and v)
        # Apply cummulative summation to output using cumsum_trajectory function
        output_val, input_trajectory_val_temp = cumsum_trajectory(
            output=output_val,
            trajectory=input_trajectory_val,
            trajectory_startpos=input_trajectory_val_startpos)
        # Project the (u, v, depth) to world space
        output_val = pt.stack([
            projectToWorldSpace(screen_space=input_trajectory_val_temp[i],
                                depth=output_val[i],
                                projection_matrix=projection_matrix,
                                camera_to_world_matrix=camera_to_world_matrix)
            for i in range(output_val.shape[0])
        ])
        # Detach for use hidden as a weights in next batch
        cell_state.detach()
        cell_state = cell_state.detach()
        hidden.detach()
        hidden = hidden.detach()

        # Calculate loss of unprojected trajectory
        train_loss = MSELoss(output=output_train,
                             trajectory_gt=output_trajectory_train_xyz,
                             mask=output_trajectory_train_mask,
                             lengths=output_trajectory_train_lengths)
        val_loss = MSELoss(output=output_val,
                           trajectory_gt=output_trajectory_val_xyz,
                           mask=output_trajectory_val_mask,
                           lengths=output_trajectory_val_lengths)

        train_loss.backward(
        )  # Perform a backpropagation and calculates gradients
        optimizer.step()  # Updates the weights accordingly to the gradients
        if epoch % 10 == 0:
            print('Epoch : {}/{}.........'.format(epoch, n_epochs), end='')
            print('Train Loss : {:.3f}'.format(train_loss.item()), end=', ')
            print('Val Loss : {:.3f}'.format(val_loss.item()))
            wandb.log({
                'Train Loss': train_loss.item(),
                'Validation Loss': val_loss.item()
            })
            if min_val_loss > val_loss:
                # Save model checkpoint
                print('[#]Saving a model checkpoint')
                min_val_loss = val_loss
                # Save to directory
                pt.save(model.state_dict(), model_checkpoint_path)
                # Save to wandb
                pt.save(model.state_dict(),
                        os.path.join(wandb.run.dir, 'model.pt'))

        if epoch % 250 == 0:
            if visualize_trajectory_flag == True:
                # Visualize by make a subplots of trajectory
                n_vis = 5
                fig = make_subplots(rows=n_vis,
                                    cols=2,
                                    specs=[[{
                                        'type': 'scatter3d'
                                    }, {
                                        'type': 'scatter3d'
                                    }]] * n_vis,
                                    horizontal_spacing=0.05,
                                    vertical_spacing=0.01)
                # Append the start position and apply cummulative summation for transfer the displacement to the x, y, z coordinate. These will done by visualize_trajectory function
                # Can use mask directly because the mask obtain from full trajectory(Not remove the start pos)
                visualize_trajectory(
                    output=pt.mul(output_train, output_trajectory_train_mask),
                    trajectory_gt=output_trajectory_train_xyz,
                    trajectory_startpos=output_trajectory_train_startpos,
                    lengths=input_trajectory_train_lengths,
                    mask=output_trajectory_train_mask,
                    fig=fig,
                    flag='Train',
                    n_vis=n_vis)
                visualize_trajectory(
                    output=pt.mul(output_val, output_trajectory_val_mask),
                    trajectory_gt=output_trajectory_val_xyz,
                    trajectory_startpos=output_trajectory_val_startpos,
                    lengths=input_trajectory_val_lengths,
                    mask=output_trajectory_val_mask,
                    fig=fig,
                    flag='Validation',
                    n_vis=n_vis)
                # Adjust the layout/axis
                fig.update_layout(height=1920, width=1500, autosize=True)
                plotly.offline.plot(
                    fig,
                    filename=
                    './{}/trajectory_visualization_depth_auto_scaled.html'.
                    format(visualization_path),
                    auto_open=False)
                wandb.log({
                    "AUTO SCALED : Trajectory Visualization(Col1=Train, Col2=Val)":
                    wandb.Html(
                        open(
                            './{}/trajectory_visualization_depth_auto_scaled.html'
                            .format(visualization_path)))
                })
                fig = visualize_layout_update(fig=fig, n_vis=n_vis)
                plotly.offline.plot(
                    fig,
                    filename=
                    './{}/trajectory_visualization_depth_pitch_scaled.html'.
                    format(visualization_path),
                    auto_open=False)
                wandb.log({
                    "PITCH SCALED : Trajectory Visualization(Col1=Train, Col2=Val)":
                    wandb.Html(
                        open(
                            './{}/trajectory_visualization_depth_pitch_scaled.html'
                            .format(visualization_path)))
                })
    return min_val_loss, hidden, cell_state
예제 #3
0
파일: verify.py 프로젝트: Alex1992On/client
def check_run(api: Api) -> None:
    print(
        "Checking logged metrics, saving and downloading a file".ljust(72, "."), end=""
    )
    failed_test_strings = []

    # set up config
    n_epochs = 4
    string_test = "A test config"
    dict_test = {"config_val": 2, "config_string": "config string"}
    list_test = [0, "one", "2"]
    config = {
        "epochs": n_epochs,
        "stringTest": string_test,
        "dictTest": dict_test,
        "listTest": list_test,
    }
    # create a file to save
    filepath = "./test with_special-characters.txt"
    f = open(filepath, "w")
    f.write("test")
    f.close()

    with wandb.init(reinit=True, config=config) as run:
        run_id = run.id
        entity = run.entity
        logged = True
        try:
            for i in range(1, 11):
                run.log({"loss": 1.0 / i}, step=i)
            log_dict = {"val1": 1.0, "val2": 2}
            run.log({"dict": log_dict}, step=i + 1)
        except Exception:
            logged = False
            failed_test_strings.append(
                "Failed to log values to run. Contact W&B for support."
            )

        try:
            run.log({"HT%3ML ": wandb.Html('<a href="https://mysite">Link</a>')})
        except Exception:
            failed_test_strings.append(
                "Failed to log to media. Contact W&B for support."
            )

        wandb.save(filepath)

    public_api = wandb.Api()
    try:
        prev_run = public_api.run("{}/{}/{}".format(entity, PROJECT_NAME, run_id))
    except Exception:
        failed_test_strings.append(
            "Failed to access run through API. Contact W&B for support."
        )
        print_results(failed_test_strings, False)
        return
    for key, value in prev_run.config.items():
        if config[key] != value:
            failed_test_strings.append(
                "Read config values don't match run config. Contact W&B for support."
            )
            break
    if logged and (
        prev_run.history_keys["keys"]["loss"]["previousValue"] != 0.1
        or prev_run.history_keys["lastStep"] != 11
        or prev_run.history_keys["keys"]["dict.val1"]["previousValue"] != 1.0
        or prev_run.history_keys["keys"]["dict.val2"]["previousValue"] != 2
    ):
        failed_test_strings.append(
            "History metrics don't match logged values. Check database encoding."
        )

    if logged and prev_run.summary["loss"] != 1.0 / 10:
        failed_test_strings.append(
            "Read summary values don't match expected value. Check database encoding, or contact W&B for support."
        )
    try:
        read_file = prev_run.file(filepath).download(replace=True)
    except Exception:
        with wandb.init(
            project=PROJECT_NAME, config={"test": "test direct saving"}
        ) as run:
            saved, status_code, _ = try_manual_save(api, filepath, run.id, run.entity)
            if saved:
                failed_test_strings.append(
                    "Unable to download file. Check SQS configuration, topic configuration and bucket permissions."
                )
            else:
                failed_test_strings.append(
                    "Unable to save file with status code: {}. Check SQS configuration and bucket permissions.".format(
                        status_code
                    )
                )

            print_results(failed_test_strings, False)
        return
    contents = read_file.read()
    if contents != "test":
        failed_test_strings.append(
            "Contents of downloaded file do not match uploaded contents. Contact W&B for support."
        )
    print_results(failed_test_strings, False)
예제 #4
0
        wandbLog["AEstVar"] = AEstVar_t / train_c
        wandbLog["angAfterNorm"] = angAfterNorm_t / train_c

        if config.segmentation:
            customHtml = str(paramN) + "<br>"
            # customHtml += ', '.join('{:0.5f}'.format(i) for i in (scheduler.get_lr()))+"<br>"
            customHtml += niceL2S(
                'torch.sigmoid(LG_UpdS[0:3,0:seedNumber])',
                torch.sigmoid(LG_UpdS[0:3, 0:config.sequence_seed])) + "<br>"
            customHtml += niceL2S(
                'torch.sigmoid(LG_UpdT[:,1:seedNumber])',
                torch.sigmoid(LG_UpdT[:, 1:config.sequence_seed])) + "<br>"
            customHtml += niceL2S(
                'torch.sigmoid(LG_TeS[:,1:seedNumber])',
                torch.sigmoid(LG_TeS[:, 1:config.sequence_seed])) + "<br>"
            wandbLog["html"] = wandb.Html(customHtml)

        print(
            '...done! ', Fore.LIGHTYELLOW_EX +
            "Took: {:.2f}".format(time.time() - start_time) + " Sec" +
            Fore.RESET)

    if t % config.validate_every > 0 and not inference_phase and not is_sweep:
        print(Fore.LIGHTYELLOW_EX, " ==> Skip validation",
              (config.validate_every - (t % config.validate_every)), "!...",
              Fore.RESET)
        wandblog(wandbLog, commit=True)
        t = t + 1
        continue

    start_time = time.time()
def main():
    wandb.init()

    histogram_small_literal = wandb.Histogram(np_histogram=([1, 2, 4],
                                                            [3, 10, 20, 0]))
    histogram_large_random = wandb.Histogram(
        numpy.random.randint(255, size=(1000)))
    numpy_array = numpy.random.rand(1000)
    torch_tensor = torch.rand(1000, 1000)
    data_frame = pandas.DataFrame(data=numpy.random.rand(1000),
                                  columns=['col'])
    tensorflow_variable_single = tensorflow.Variable(543.01,
                                                     tensorflow.float32)
    tensorflow_variable_multi = tensorflow.Variable([[2, 3], [7, 11]],
                                                    tensorflow.int32)
    plot_scatter = go.Figure(  # plotly
        data=go.Scatter(x=[0, 1, 2]),
        layout=go.Layout(title=go.layout.Title(text="A Bar Chart")))

    image_data = numpy.zeros((28, 28))
    image_cool = wandb.Image(image_data, caption="Cool zeros")
    image_nice = wandb.Image(image_data, caption="Nice zeros")
    image_random = wandb.Image(numpy.random.randint(255, size=(28, 28, 3)))
    image_pil = wandb.Image(PIL.Image.new("L", (28, 28)))
    plt.plot([1, 2, 3, 4])
    plt.ylabel('some interesting numbers')
    image_matplotlib_plot = wandb.Image(plt)
    matplotlib_plot = plt

    audio_data = numpy.random.uniform(-1, 1, 44100)
    sample_rate = 44100
    caption1 = "This is what a dog sounds like"
    caption2 = "This is what a chicken sounds like"
    # test with all captions
    audio1 = wandb.Audio(audio_data, sample_rate=sample_rate, caption=caption1)
    audio2 = wandb.Audio(audio_data, sample_rate=sample_rate, caption=caption2)
    # test with no captions
    audio3 = wandb.Audio(audio_data, sample_rate=sample_rate)
    audio4 = wandb.Audio(audio_data, sample_rate=sample_rate)
    # test with some captions
    audio5 = wandb.Audio(audio_data, sample_rate=sample_rate)
    audio6 = wandb.Audio(audio_data, sample_rate=sample_rate, caption=caption2)

    html = wandb.Html("<html><body><h1>Hello</h1></body></html>")

    table_default_columns = wandb.Table()
    table_default_columns.add_data("Some awesome text", "Positive", "Negative")

    table_custom_columns = wandb.Table(["Foo", "Bar"])
    table_custom_columns.add_data("So", "Cool")
    table_custom_columns.add_data("&", "Rad")

    #plot_figure = matplotlib.pyplot.plt.figure()
    #c1 = matplotlib.pyplot.plt.Circle((0.2, 0.5), 0.2, color='r')
    #ax = matplotlib.pyplot.plt.gca()
    #ax.add_patch(c1)
    #matplotlib.pyplot.plt.axis('scaled')

    # pytorch model graph
    alex = models.AlexNet()
    graph = wandb.wandb_torch.TorchGraph.hook_torch(alex)
    alex.forward(dummy_torch_tensor((2, 3, 224, 224)))

    with tensorflow.Session().as_default() as sess:
        sess.run(tensorflow.global_variables_initializer())

        wandb.run.summary.update({
            'histogram-small-literal-summary':
            histogram_small_literal,
            'histogram-large-random-summary':
            histogram_large_random,
            'numpy-array-summary':
            numpy_array,
            'torch-tensor-summary':
            torch_tensor,
            # bare dataframes in summary and history removed in 0.10.21
            # 'data-frame-summary': data_frame,
            'image-cool-summary':
            image_cool,
            'image-nice-summary':
            image_nice,
            'image-random-summary':
            image_random,
            'image-pil-summary':
            image_pil,
            'image-plot-summary':
            image_matplotlib_plot,
            'image-list-summary':
            [image_cool, image_nice, image_random, image_pil],

            # Doesn't work, because something has happened to the MPL object (MPL may
            # be doing magical scope stuff). If you log it right after creating it,
            # it works fine.
            # 'matplotlib-plot': matplotlib_plot,
            'audio1-summary':
            audio1,
            'audio2-summary':
            audio2,
            'audio3-summary':
            audio3,
            'audio4-summary':
            audio4,
            'audio5-summary':
            audio5,
            'audio6-summary':
            audio6,
            'audio-list-summary':
            [audio1, audio2, audio3, audio4, audio5, audio6],
            'html-summary':
            html,
            'table-default-columns-summary':
            table_default_columns,
            'table-custom-columns-summary':
            table_custom_columns,
            'plot-scatter-summary':
            plot_scatter,
            #'plot_figure': plot_figure,
            'tensorflow-variable-single-summary':
            tensorflow_variable_single,
            'tensorflow-variable-multi-summary':
            tensorflow_variable_multi,
            'graph-summary':
            graph,
        })

        for i in range(10):
            wandb.run.history.add({
                'string':
                'string',
                'histogram-small-literal':
                histogram_small_literal,
                'histogram-large-random':
                histogram_large_random,
                'numpy-array':
                numpy_array,
                'torch-tensor':
                torch_tensor,
                #'data-frame': data_frame,  # not supported yet
                'image-cool':
                image_cool,
                'image-nice':
                image_nice,
                'image-random':
                image_random,
                'image-pil':
                image_pil,
                'image-plot':
                image_matplotlib_plot,
                'image-list':
                [image_cool, image_nice, image_random, image_pil],

                # 'matplotlib-plot': matplotlib_plot,
                'audio1':
                audio1,
                'audio2':
                audio2,
                'audio3':
                audio3,
                'audio4':
                audio4,
                'audio5':
                audio5,
                'audio6':
                audio6,
                'audio-list': [audio1, audio2, audio3, audio4, audio5, audio6],
                'html':
                html,
                'table-default-columns':
                table_default_columns,
                'table-custom-columns':
                table_custom_columns,
                'plot-scatter':
                plot_scatter,
                #'plot_figure': plot_figure,
                'tensorflow-variable-single':
                tensorflow_variable_single,
                'tensorflow-variable-multi':
                tensorflow_variable_multi,

                #'graph': graph,
            })

        wandb.run.summary.update({
            'histogram-small-literal-summary':
            histogram_small_literal,
            'histogram-large-random-summary':
            histogram_large_random,
            'numpy-array-summary':
            numpy_array,
            'torch-tensor-summary':
            torch_tensor,
            # bare dataframes in summary and history removed in 0.10.21
            # 'data-frame-summary': data_frame,
            'image-cool-summary':
            image_cool,
            'image-nice-summary':
            image_nice,
            'image-random-summary':
            image_random,
            'image-pil-summary':
            image_pil,
            'image-plot-summary':
            image_matplotlib_plot,
            'image-list-summary':
            [image_cool, image_nice, image_random, image_pil],

            # 'matplotlib-plot': matplotlib_plot,
            'audio1-summary':
            audio1,
            'audio2-summary':
            audio2,
            'audio3-summary':
            audio3,
            'audio4-summary':
            audio4,
            'audio5-summary':
            audio5,
            'audio6-summary':
            audio6,
            'audio-list-summary':
            [audio1, audio2, audio3, audio4, audio5, audio6],
            'html-summary':
            html,
            'table-default-columns-summary':
            table_default_columns,
            'table-custom-columns-summary':
            table_custom_columns,
            'plot-scatter-summary':
            plot_scatter,
            #'plot_figure': plot_figure,
            'tensorflow-variable-single-summary':
            tensorflow_variable_single,
            'tensorflow-variable-multi-summary':
            tensorflow_variable_multi,
            'graph-summary':
            graph,
        })
def _make_html():
    return wandb.Html(
        "<p>Embedded</p><iframe src='https://wandb.ai'></iframe>")