Пример #1
0
def detect_image(model, device, classes, window_name='Detections'):
    model.to(device)
    model.eval()
    dataloader = DataLoader(ImageFolder(OPT.image_folder, img_size=OPT.img_size),
                            batch_size=OPT.batch_size, shuffle=False, num_workers=OPT.n_cpu)
    imgs = []           # Stores image paths
    img_detections = [] # Stores detections for each image index
    print('\nPerforming object detection:')
    with torch.no_grad():
        for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
            # Configure input
            input_imgs = input_imgs.to(device)
            # Get detections
            detections, inference_time = detect(model, input_imgs)
            print('\t+ Batch %d, Inference Time: %s' % (batch_i, inference_time))
            # Save image and detections
            imgs.extend(img_paths)
            img_detections.extend(detections)
    # Bounding-box colors
    colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for i in range(NUM_CLASSES)]
    print('\nSaving images:')
    # Iterate through images and save plot of detections
    for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):
        print("(%d) Image: '%s'" % (img_i, path))
        img = cv2.imread(path)
        img = tools.resize(img, DISPLAY_WIDTH, None)
        height, width = img.shape[:2]
        # The amount of padding that was added
        if height <= width:
            pad_x = 0
            pad_y = (width - height) * OPT.img_size // width
        else:
            pad_x = (height - width) * OPT.img_size // height
            pad_y = 0
        # Image height and width after padding is removed
        unpad_h = OPT.img_size - pad_y
        unpad_w = OPT.img_size - pad_x
        # Draw bounding boxes and labels of detections
        if detections is not None:
            unique_labels = detections[:, -1].cpu().unique()
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                print('\t+ Label: %s, Conf: %.5f' % (classes[int(cls_pred)], cls_conf.item()))
                # Rescale coordinates to original dimensions
                box_h = (y2 - y1) * height / unpad_h
                box_w = (x2 - x1) * width / unpad_w
                y1 = (y1 - pad_y // 2) * height / unpad_h
                x1 = (x1 - pad_x // 2) * width / unpad_w
                color = colors[int(np.where(unique_labels == int(cls_pred))[0])]
                # Create a Rectangle patch
                cv2.rectangle(img, (x1, y1), (x1+box_w, y1+box_h), color, thickness=3)
                cv2.rectangle(img, (x1-2, y1-20), (x1+80, y1), color, thickness=-1)
                cv2.putText(img, classes[int(cls_pred)], (x1+3, y1-3), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
        # Save generated image with detections
        cv2.imshow(window_name, img)
        # cv2.waitKey(0)
        # quit()
        cv2.waitKey(200)
        cv2.imwrite('output/%d.png' % (img_i), img)
Пример #2
0
 def test_ImageFolder(self):
     img_folder = "13-1-yolo-pytorch/samples"
     dataloader = DataLoader(
         ImageFolder(img_folder, img_size=416),
         batch_size=1,
         shuffle=False,
         num_workers=0,
     )
     for batch_i, (img_path, input_imgs) in enumerate(dataloader):
         print(img_path, '->', input_imgs[0, 0, 100, 50:])
Пример #3
0
    os.makedirs("output", exist_ok=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = Darknet(opt.model_def, img_size=opt.img_size).to(device)

    if opt.weights_path.endswith(".weights"):
        """Load darknet weights"""
        model.load_darknet_weights(opt.weights_path)
    else:
        """Load checkpoint weights"""
        model.load_state_dict(torch.load(opt.weights_path))

    model.eval()

    dataloader = DataLoader(
        ImageFolder(opt.image_folder, img_size=opt.img_size),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.n_cpu,
    )

    classes = load_classes(opt.class_path)  # Extracts class labels from file

    imgs = []  # Stores image paths
    img_detections = []  # Stores detections for each image index

    print("\nPerforming object detection:")
    prev_time = time.time()
    for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
        input_imgs = input_imgs.requires_grad_(False).to(device)
Пример #4
0
    mnv2 = MobileNetV2(n_class=1000)
    state_dict = torch.load(
        '/Users/tczhong/mobilenet_v2.pth.tar',
        map_location='cpu')  # add map_location='cpu' if no gpu
    mnv2.load_state_dict(state_dict)
    mnv2.classifier = Identity()
    if cuda:
        mnv2.cuda()

    #vgg16 = models.vgg16(pretrained=True)
    #vgg16.classifier = nn.Sequential(*[vgg16.classifier[i] for i in range(4)])
    #if cuda:
    #    vgg16.cuda()

    dirName = "/Users/tczhong/image/"
    dataloader = DataLoader(ImageFolder(dirName, img_size=opt.img_size),
                            batch_size=opt.batch_size,
                            shuffle=False,
                            num_workers=opt.n_cpu)

    Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    prev_feature = None
    cos = nn.CosineSimilarity(dim=1)
    prev_time = time.time()
    total_time = datetime.timedelta(seconds=0)
    for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
        input_imgs = Variable(input_imgs.type(Tensor))
        print(input_imgs.size())
        cur_feature = mnv2(input_imgs)
        #cur_feature = vgg16(input_imgs)
        current_time = time.time()
import argparse
import caffe.proto.caffe_pb2 as caffe_pb2
from torch.utils.data import DataLoader
from utils.datasets import ImageFolder, ListDataset

# Default input prototxt path
HOME_DIR = str(pathlib.Path(__file__).parent.absolute().parent.absolute())
_PROTOTXT_PATH = '%s/SSD_caffe_model/ASU_model/deploy_6.prototxt' % HOME_DIR

# Default input caffemodel path
_CAFFEMODEL_PATH = '%s/SSD_caffe_model/ASU_model/SSD300_6_iter_120000.caffemodel' % HOME_DIR

# Default output pytorch weights path
_PTHMODEL_PATH = '%s/SSD_pytorch_model/SSD300_6_iter_120000.pth' % HOME_DIR

torch_folder_dataloader = DataLoader(ImageFolder('data/samples', img_size=300),
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=1,
                                     pin_memory=True)

torch_list_dataloader = DataLoader(ListDataset('data/samples/listfile.txt',
                                               img_size=300,
                                               normalized_labels=False),
                                   batch_size=1,
                                   shuffle=False,
                                   num_workers=1,
                                   pin_memory=True)


def parse_caffemodel(caffemodel):