def initialize_dyros_client_2(self):
        host = SocketType.dyros_2.value["host"]
        port = SocketType.dyros_2.value["port"]
        sock = su.initialize_client(host, port)
        self.logger.info("==> Connected to Dyros 2 server on {}:{}".format(
            host, port))

        return sock
Esempio n. 2
0
def main():
    sock = su.initialize_client('localhost', 7777)

    client = HandClient()

    while True:

        frame = su.recvall_pickle(sock)
        pose = client.get_pose(frame)
        su.sendall_pickle(sock, pose)
Esempio n. 3
0
def main(args):

    sock = su.initialize_client('localhost', 7777)

    parser = make_parser()
    args = parser.parse_args(args)
    cfg = grasp_estimator.joint_config(
        args.vae_checkpoint_folder,
        args.evaluator_checkpoint_folder,
    )
    cfg['threshold'] = args.threshold
    cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
    cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
    estimator = grasp_estimator.GraspEstimator(cfg)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    sess = tf.Session()
    estimator.build_network()
    estimator.load_weights(sess)

    while True:
        data = su.recvall_pickle(sock)

        print(data.keys())
        for k in data.keys():
            print(k, np.shape(data[k]))

        depth = data['depth']
        image = data['image']
        K = data['intrinsics_matrix']
        # Removing points that are farther than 1 meter or missing depth 
        # values.
        depth[depth == 0] = np.nan
        depth[depth > 1] = np.nan
        pc, selection = backproject(depth, K, return_finite_depth=True, return_selection=True)
        pc_colors = image.copy()
        pc_colors = np.reshape(pc_colors, [-1, 3])
        pc_colors = pc_colors[selection, ::-1]

        # down sampling
        idx = np.random.choice(pc_colors.shape[0], 100000, replace=False)
        pc = pc[idx, :]
        pc_colors = pc_colors[idx, :]

        # Smoothed pc comes from averaging the depth for 10 frames and removing
        # the pixels with jittery depth between those 10 frames.
        object_pc = data['smoothed_object_pc']
        latents = estimator.sample_latents()
        generated_grasps, generated_scores, _ = estimator.predict_grasps(
            sess,
            object_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )
        print("====>", generated_grasps, generated_scores)
        mlab.figure(bgcolor=(1,1,1))
        if len(generated_grasps) != 0:
            draw_scene(
                pc,
                pc_color=pc_colors,
                grasps=generated_grasps,
                grasp_scores=generated_scores,
            )
            print('close the window to continue to next object . . .')
        mlab.show()
        su.sendall_pickle(sock, [generated_grasps, generated_scores])
    cfg.freeze()
    return cfg


if __name__ == "__main__":

    yaml_path = os.path.join(
        Path(__file__).parent.parent, "params", "azure_centermask_SNU.yaml")
    with open(yaml_path) as f:
        params = yaml.load(f, Loader=yaml.FullLoader)
    cfg = setup_cfg(params)
    os.environ["CUDA_VISIBLE_DEVICES"] = params["is_gpu_id"]
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("==> Loading CenterMask on", device, params["is_gpu_id"])
    demo = VisualizationDemo(cfg)
    sock = su.initialize_client(params["tcp_ip"],
                                params["centermask_tcp_port"])

    while True:
        img = su.recvall_image(sock)
        predictions, vis_output = demo.run_on_image(img)
        pred_masks = predictions["instances"].pred_masks.cpu().detach().numpy(
        )  # (N, H, W),
        pred_boxes = predictions["instances"].pred_boxes.tensor.cpu().detach(
        ).numpy()  # (x1, y1, x2, y2)
        pred_scores = predictions["instances"].scores.cpu().detach().numpy(
        )  # a vector of N confidence scores.
        pred_classes = predictions["instances"].pred_classes.cpu().detach(
        ).numpy()  # [0, num_categories).
        vis_img = cv2.resize(vis_output.get_image()[:, :, ::-1],
                             (params["width"], params["height"]))
        su.sendall_image(sock, vis_img)
        params = yaml.load(f, Loader=yaml.FullLoader)

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = params["side_gpu_id"]

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = timm.create_model(params["side_model_name"], pretrained=False)
    model.to(device)
    model.eval()
    checkpoint = torch.load(params["side_weight_path"])
    model.load_state_dict(checkpoint)
    transform = T.Compose([
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    sock = su.initialize_client(params["tcp_ip"], params["side_tcp_port"])

    while True:
        img = su.recvall_image(sock)
        img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_LINEAR)
        img = transform(img).unsqueeze(0)
        output = model(img.to(device))
        topk = (1, )
        maxk = max(topk)
        _, pred = output.topk(maxk, 1, True, True)
        pred = np.bool(pred.t()[0].cpu().detach().numpy())
        su.sendall_pickle(sock, pred)

    sock.close()
Esempio n. 6
0
import torch
import torchvision.models as models
import torchvision.transforms as transforms

from easy_tcp_python2_3 import socket_utils as su
from imagenet_stubs.imagenet_2012_labels import label_to_name

if __name__ == "__main__":

    sock = su.initialize_client('localhost', 7777)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = models.detection.maskrcnn_resnet50_fpn(pretrained=True).to(device)
    model.eval()
    print("Using mask R-CNN with ResNet-50 backbone", device)
    transforms_composed = transforms.Compose([transforms.ToTensor()])

    while True:

        recv_image = su.recvall_image(sock)
        x = transforms_composed(recv_image).unsqueeze(0)
        outputs = model(x.to(device))[0]

        probs = outputs['scores'].cpu().detach().numpy()
        boxes = outputs["boxes"].cpu().detach().numpy()
        labels = outputs["labels"].cpu().detach().numpy()
        masks = outputs["masks"].cpu().detach().numpy()

        keep = probs > 0.5
        probs = probs[keep]
        labels = labels[keep]
        boxes = boxes[keep]
Esempio n. 7
0
    
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = params["bracket_gpu_id"]

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = timm.create_model(params["bracket_model_name"], pretrained=False)
    model.to(device)
    model.eval()
    checkpoint = torch.load(params["bracket_weight_path"])
    model.load_state_dict(checkpoint)
    transform = T.Compose([
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    sock = su.initialize_client(params["tcp_ip"], params["bracket_tcp_port"])

    while True:
        img = su.recvall_image(sock) 
        img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_LINEAR)
        img = transform(img).unsqueeze(0)
        output = model(img.to(device))

        topk=(1,)
        maxk = max(topk)
        _, pred = output.topk(maxk, 1, True, True)
        pred = np.bool(pred.t()[0].cpu().detach().numpy())
        pred = not pred
        su.sendall_pickle(sock, pred)

    sock.close()