def displayPreview(self, img, v, i):
        def handle_close(evt, self):
            self.__previewFigure = None
            self.__previewAxes = [None, None]

        if self.__previewFigure is None:  #preview window is not created yet, lets make it
            plt.ioff()
            self.__previewFigure, self.__previewAxes[0] = plt.subplots()
            divider = make_axes_locatable(self.__previewAxes[0])
            self.__previewAxes[1] = divider.append_axes('right',
                                                        size='5%',
                                                        pad=0.05)
            self.__previewFigure.canvas.mpl_connect(
                'close_event', lambda x: handle_close(x, self)
            )  # if preview figure is closed, lets clear the figure/axes handles so the next preview properly recreates the handles
            plt.ion()
            plt.show()

        for ax in self.__previewAxes:  #clear the axes
            ax.clear()
        img_handle = self.__previewAxes[0].imshow(img)
        self.__previewFigure.colorbar(img_handle, cax=self.__previewAxes[1])
        self.__previewAxes[0].set_title('{0} V, {1} A, {2} Laser'.format(
            v, i, self.laserpower))
        self.__previewFigure.canvas.draw()
        self.__previewFigure.canvas.flush_events()
        time.sleep(
            1e-4)  #pause allows plot to update during series of measurements
Esempio n. 2
0
DISPLAY = False
if DISPLAY:
    import matplotlib as plt
else:
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    plt.ioff()
from nets import big_net, weights_init
import kuramoto as km
import kura_visual as kv
import numpy as np
import torch
import loss_func_ex
from make_data_new import polyomino_scenes, mask_pro
import matplotlib.pyplot as plt
import os
import ipdb

plt.style.use('seaborn-darkgrid')

# Experiment parameters

# Saving directories
home_dir = os.path.expanduser('~/')
save_dir = os.path.join(home_dir, 'oscillators')
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

# Model name
model_name = '64_oscis_optim'
Esempio n. 3
0
def main():

    print("Waiting for camera Initialization...")
    cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    if cam.isOpened():
        print("Camera Initialized!")

    # Initialize NN forward prop.
    vgg = models.vgg16(pretrained=True)
    data_transform = transforms.Compose([
        transforms.CenterCrop(154),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    avgpool = nn.AdaptiveAvgPool2d((7, 7))
    lstm_model = lstmModel(1024 * 7 * 7, 2048, 7)
    lstm_model_path = 'state.pt'
    lstm_model.load_state_dict(torch.load(lstm_model_path,
                                          torch.device('cpu')))
    classes = [
        'No Pancake', 'Raw', 'Ready to Flip', 'Bottom Up', 'Ready to Remove',
        'Burnt', 'Obstruction'
    ]

    # ite = 0
    rgb_img, tmp_dat = read_cameras(cam)
    # ite += 1

    rgb_dim, rgb_minmax = sample_img_dim(rgb_img)
    tmp_dim, tmp_minmax = sample_img_dim(tmp_dat)
    plt.close('all')
    plt.pause(0.1)

    feature_queue = []

    # Display Image
    fig, axs = plt.subplots(1, 2)

    axs[0].set_title("RGB Image")
    ax0 = axs[0].imshow(np.zeros((154, 154, 3)))

    axs[1].set_title("Temperature Image")
    ax1 = axs[1].imshow(np.zeros((154, 154, 3)))
    fig.suptitle("Undefined")

    plt.ion()

    while True:
        # Capture Image
        rgb_img, tmp_dat = read_cameras(cam)
        # ite += 1

        # Crop Image
        rgb_cropped = crop_and_resize_image(rgb_img, rgb_minmax, rgb_dim, RES)
        #print(rgb_cropped.shape)
        tmp_cropped = crop_and_resize_data(tmp_dat, tmp_minmax)
        #print(tmp_cropped.shape)
        # print(rgb_cropped)
        # print(tmp_cropped)

        # Feed Forward
        # Transform images
        rgb_cropped_show = Image.fromarray(rgb_cropped.astype(np.uint8))
        print(rgb_cropped_show)
        tmp_cropped_show = Image.fromarray(tmp_cropped.astype(
            np.uint8)).convert('RGB')
        print(tmp_cropped_show)
        rgb_cropped = data_transform(rgb_cropped_show)
        tmp_cropped = data_transform(tmp_cropped_show)

        # Add 4th dimension
        rgb_cropped = torch.Tensor(rgb_cropped).unsqueeze(0)
        tmp_cropped = torch.Tensor(tmp_cropped).unsqueeze(0)

        rgb_feature = vgg.features(rgb_cropped)
        tmp_feature = vgg.features(tmp_cropped)

        # Concatenate features
        rgb_feature_tensor = torch.from_numpy(rgb_feature.detach().numpy())
        tmp_feature_tensor = torch.from_numpy(tmp_feature.detach().numpy())
        rgb_tmp_combined_tensor = torch.cat(
            (rgb_feature_tensor, tmp_feature_tensor), dim=1)
        rgb_tmp_combined_tensor = avgpool(rgb_tmp_combined_tensor)
        rgb_tmp_combined_tensor = torch.flatten(rgb_tmp_combined_tensor)

        # Maintain Feature Queue
        feature_queue.append(rgb_tmp_combined_tensor.detach().numpy())
        if len(feature_queue) < 8:
            continue

        feature_input_tensor = torch.Tensor(np.array(feature_queue, ndmin=3))
        outputs = lstm_model(feature_input_tensor)
        # print(outputs)
        predicted_class = outputs.detach().numpy().argmax()
        print(classes[predicted_class])
        feature_queue.pop(0)

        #axs[0].set_title("RGB Image")
        ax0.set_data(rgb_cropped_show)

        #axs[1].set_title("Temperature Image")
        ax1.set_data(tmp_cropped_show)
        # plt.show()
        fig.suptitle(classes[predicted_class])
        plt.pause(0.01)

    plt.ioff()
    plt.show()
    cam.release()
    cv2.destroyAllWindows()