Ejemplo n.º 1
0
export_basename = 'tmp/' + os.path.splitext(os.path.basename(__file__))[0]
p = player.Player(export_basename)

p.click()
for i in range(3):
    p.move_down()
p.click()
for i in range(5):
    p.move_down()
p.click()
p.endstop_z()
p.wait_ms(300)
p.endstop_z()
p.wait_ms(100)
p.endstop_x()
p.wait_ms(300)
p.endstop_x()
p.wait_ms(100)
p.endstop_y()
p.wait_ms(300)
p.endstop_y()
p.wait_ms(500)
p.transfer_off()

events = p.run()

timeline = decoder.decode(export_basename + '.bin')

p.close()
def main(argv):
    if (len(sys.argv) != 3):
        print('Usage ./makeTrainCoco.py [root_dir] [config.yml]')
        sys.exit(1)
    #Load data
    root_dir = argv[1] + '/'  #in case we forgot
    print('Opening file' + root_dir + argv[2])
    with open(root_dir + argv[2], 'r') as configfile:
        config = yaml.safe_load(configfile)

    image_dir = root_dir + config['data_dir']
    train_dir = root_dir + config['data_dir']
    weights_dir = root_dir + config['weights_dir']
    your_weights = weights_dir + config['generic_weights']
    annotations_file = train_dir + config['untrained_annotations_fname']
    train_files_regex = config['generic_train_files_regex']

    train_images = glob.glob(image_dir + train_files_regex)

    max_l = 100
    min_l = 10

    im_size = 864  #size of training imageas for yolo

    ##################################################
    #im_size=416 #size of training imageas for yolo
    yolov3 = get_yolo_model(im_size, im_size, trainable=False)
    yolov3.load_weights(your_weights, by_name=True)

    ########################################
    im_num = 1
    all_imgs = []
    for imagename in train_images:
        im = cv2.imread(imagename)
        print('processing image ' + imagename + ', ' + str(im_num) + ' of ' +
              str(len(train_images)) + '...')
        height, width = im.shape[:2]
        im_num += 1

        n_count = 0
        for x in np.arange(0, width - im_size, im_size):
            for y in np.arange(0, height - im_size, im_size):
                img_data = {
                    'object': []
                }  #dictionary? key-value pair to store image data
                head, tail = os.path.split(imagename)
                noext, ext = os.path.splitext(tail)
                save_name = train_dir + '/TR_' + noext + '-' + str(
                    n_count) + '.png'
                box_name = train_dir + '/bbox/' + noext + '-' + str(
                    n_count) + '.png'
                img = im[y:y + im_size, x:x + im_size, :]
                cv2.imwrite(save_name, img)
                img_data['filename'] = save_name
                img_data['width'] = im_size
                img_data['height'] = im_size
                n_count += 1
                # use the yolov3 model to predict 80 classes on COCO

                # preprocess the image
                image_h, image_w, _ = img.shape
                new_image = img[:, :, ::-1] / 255.
                new_image = np.expand_dims(new_image, 0)

                # run the prediction
                yolos = yolov3.predict(new_image)

                boxes = decode(yolos, obj_thresh=0.005, nms_thresh=0.5)
                for b in boxes:
                    xmin = int(b[0])
                    xmax = int(b[2])
                    ymin = int(b[1])
                    ymax = int(b[3])
                    obj = {}

                    obj['name'] = 'aoi'

                    if xmin < 0: continue
                    if ymin < 0: continue
                    if xmax > im_size: continue
                    if ymax > im_size: continue
                    if (xmax - xmin) < min_l: continue
                    if (xmax - xmin) > max_l: continue
                    if (ymax - ymin) < min_l: continue
                    if (ymax - ymin) > max_l: continue

                    obj['xmin'] = xmin
                    obj['ymin'] = ymin
                    obj['xmax'] = xmax
                    obj['ymax'] = ymax
                    img_data['object'] += [obj]
                    cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0),
                                  2)

                cv2.imwrite(box_name, img)
                all_imgs += [img_data]

    #print(all_imgs)
    print('Saving data to ' + annotations_file)
    with open(annotations_file, 'w') as handle:
        yaml.dump(all_imgs, handle)

    print('Finished! :o)')
Ejemplo n.º 3
0
def main(argv):
    if (len(sys.argv) != 3):
        print('Usage ./makeTrain.py [root_dir] [config.yml]')
        sys.exit(1)
    #Load data
    root_dir = argv[1] + '/'  #in case we forgot
    print('Opening file' + root_dir + argv[2])
    with open(root_dir + argv[2], 'r') as configfile:
        config = yaml.safe_load(configfile)

    image_dir = root_dir + config['data_dir']
    train_dir = root_dir + config['data_dir']
    your_weights = root_dir + config['weights_dir'] + config['specific_weights']
    trained_annotations_fname = train_dir + config['trained_annotations_fname']
    train_files_regex = config['specific_train_files_regex']

    train_images = glob.glob(train_dir + train_files_regex)
    shuffle(train_images)

    max_l = 100
    min_l = 10

    im_size = 864  #size of training imageas for yolo

    ##################################################
    #im_size=416 #size of training imageas for yolo
    yolov3 = get_yolo_model(im_size, im_size, num_class=1, trainable=False)
    yolov3.load_weights(your_weights)

    ########################################
    im_num = 1
    all_imgs = []
    for imagename in train_images:
        img = cv2.imread(imagename)
        print('processing image ' + imagename + ', ' + str(im_num) + ' of ' +
              str(len(train_images)) + '...')
        im_num += 1

        img_data = {
            'object': []
        }  #dictionary? key-value pair to store image data
        head, tail = os.path.split(imagename)
        noext, ext = os.path.splitext(tail)
        box_name = train_dir + '/bbox/' + tail
        img_data['filename'] = tail
        img_data['width'] = im_size
        img_data['height'] = im_size

        # use the trained yolov3 model to predict

        # preprocess the image
        image_h, image_w, _ = img.shape
        new_image = img[:, :, ::-1] / 255.
        new_image = np.expand_dims(new_image, 0)

        # run the prediction
        yolos = yolov3.predict(new_image)

        boxes = decode(yolos, obj_thresh=0.2, nms_thresh=0.3)
        for b in boxes:
            xmin = int(b[0])
            xmax = int(b[2])
            ymin = int(b[1])
            ymax = int(b[3])
            obj = {}

            obj['name'] = 'aoi'

            if xmin < 0: continue
            if ymin < 0: continue
            if xmax > im_size: continue
            if ymax > im_size: continue
            if (xmax - xmin) < min_l: continue
            if (xmax - xmin) > max_l: continue
            if (ymax - ymin) < min_l: continue
            if (ymax - ymin) > max_l: continue

            obj['xmin'] = xmin
            obj['ymin'] = ymin
            obj['xmax'] = xmax
            obj['ymax'] = ymax
            img_data['object'] += [obj]
            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)

        cv2.imwrite(box_name, img)
        all_imgs += [img_data]

    #print(all_imgs)
    with open(trained_annotations_fname, 'w') as handle:
        yaml.dump(all_imgs, handle)
Ejemplo n.º 4
0
            #cv2.imwrite(save_name, img)
            img_data['filename'] = tail
            img_data['width'] = im_size
            img_data['height'] = im_size
            n_count+=1
            # use the yolov3 model to predict 80 classes on COCO

            # preprocess the image
            image_h, image_w, _ = img.shape
            new_image = img[:,:,::-1]/255.
            new_image = np.expand_dims(new_image, 0)

            # run the prediction
            yolos = yolov3.predict(new_image)

            boxes = decode(yolos, obj_thresh=0.40, nms_thresh=0.3)
            for b in boxes:
                xmin=int(b[0])
                xmax=int(b[2])
                ymin=int(b[1])
                ymax=int(b[3])

                obj = {}

                obj['name'] = 'aoi'

 #               xmin = point['xcoord'] - x - sz_2
 #               xmax = point['xcoord'] - x + sz_2
 #               ymin = point['ycoord'] - y - sz_2
 #               ymax = point['ycoord'] - y + sz_2
Ejemplo n.º 5
0
def main(argv):
    if(len(sys.argv) != 3):
        print('Usage ./postTrainTest.py [data_dir] [config.yml]')
        sys.exit(1)
    #Load data
    data_dir = argv[1]  + '/' #in case we forgot '/'
    print('Opening file' + argv[2])
    with open(argv[2], 'r') as configfile:
        config = yaml.safe_load(configfile)

    image_dir = data_dir
    train_dir = data_dir
    weights_dir = data_dir + config['weights_dir']

    #Training type dependent
    trained_weights = weights_dir + config['trained_weights']

    #based on get_yolo_model defaults and previous makTrain.py files
    num_class=config['specific']['num_class']
    obj_thresh=config['specific']['obj_thresh']
    nms_thresh=config['specific']['nms_thresh']

    list_of_train_files = config['checked_annotations_fname']

    annotations_file = train_dir + config['untrained_annotations_fname']
    with open (annotations_file, 'r') as fp:
        all_imgs = yaml.load(fp)

    max_l=config['MAX_L'] #maximal object size in pixels
    min_l=config['MIN_L']
    im_size=config['IMAGE_H'] #size of training imageas for yolo

    ##################################################
    print("Loading YOLO models")
    yolov3 = get_yolo_model(im_size,im_size,num_class,trainable=False)
    yolov3.load_weights(trained_weights,by_name=True) #TODO is by_name necessary here?
    print("YOLO models loaded, my dear.")
    ########################################

    #read in all images from checked annotations (GROUND TRUTH)
    for i in range(len(all_imgs)):
        basename = os.path.basename(all_imgs[i]['filename'])

        #remove extension from basename:
        name_seed_split = basename.split('.')[:-1]
        name_seed = '.'.join(name_seed_split)
        fname_gt = image_dir + "/groundtruths/" + name_seed + ".txt"
        fname_pred = image_dir + "/predictions/" + name_seed + ".txt"

        img_data = {'object':[]}
        img_data['filename'] = basename
        img_data['width'] = all_imgs[i]['width']
        img_data['height'] = all_imgs[i]['height']

        #Reading ground truth
        boxes_gt=[]
        for obj in all_imgs[i]['object']:
            boxes_gt.append([obj['xmin'],obj['ymin'],obj['xmax'],obj['ymax']])
        sys.stdout.write('GT objects:')
        sys.stdout.write(str(len(boxes_gt)))
        sys.stdout.flush()
        #do box processing
        img = cv2.imread(image_dir + basename)

        with open(fname_gt, 'w') as file_gt: #left top righ bottom
            for b in boxes_gt:
                obj = {}
                if ((b[2]-b[0])*(b[3]-b[1]))<10:
                    continue
                obj['name'] = 'aoi'
                obj['xmin'] = int(b[0])
                obj['ymin'] = int(b[1])
                obj['xmax'] = int(b[2])
                obj['ymax'] = int(b[3])
                img_data['object'] += [obj]
                file_gt.write(obj['name'] + " " )
                file_gt.write(str(obj['xmin']) + " " )
                file_gt.write(str(obj['ymin']) + " " )
                file_gt.write(str(obj['xmax']) + " " )
                file_gt.write(str(obj['ymax']))
                file_gt.write('\n')

        # preprocess the image
        image_h, image_w, _ = img.shape
        new_image = img[:,:,::-1]/255.
        new_image = np.expand_dims(new_image, 0)

        # run the prediction
        sys.stdout.write('Yolo predicting...')
        sys.stdout.flush()
        yolos = yolov3.predict(new_image)
        sys.stdout.write('decoding...')
        sys.stdout.flush()
        boxes_predict = decode(yolos, obj_thresh, nms_thresh)
        sys.stdout.write('done!#of boxes_predict:')
        sys.stdout.write(str(len(boxes_predict)))
        sys.stdout.write('\n')
        sys.stdout.flush()

        with open(fname_pred, 'w') as file_pred: #left top righ bottom
            for b in boxes_predict:
                xmin=int(b[0])
                xmax=int(b[2])
                ymin=int(b[1])
                ymax=int(b[3])
                confidence=float(b[4])
                objpred = {}

                objpred['name'] = 'aoi'

                if xmin<0: continue
                if ymin<0: continue
                if xmax>im_size: continue
                if ymax>im_size: continue
                if (xmax-xmin)<min_l: continue
                if (xmax-xmin)>max_l: continue
                if (ymax-ymin)<min_l: continue
                if (ymax-ymin)>max_l: continue

                objpred['xmin'] = xmin
                objpred['ymin'] = ymin
                objpred['xmax'] = xmax
                objpred['ymax'] = ymax
                objpred['confidence'] = confidence
                file_pred.write(objpred['name'] + " " )
                file_pred.write(str(objpred['confidence']) + " " )
                file_pred.write(str(objpred['xmin']) + " " )
                file_pred.write(str(objpred['ymin']) + " " )
                file_pred.write(str(objpred['xmax']) + " " )
                file_pred.write(str(objpred['ymax']))
                file_pred.write('\n')


        #precision = tp / (tp + fp)
        # for box_gt in boxes_gt:
        #     for box_predict in boxes_predict:
        #         iou_val = bbox_iou(box_predict,box_gt)
        #         print(iou_val)

    #count prediction which reache a threshold of let's say 0.5
    # if we cahnge the dection threshold I think we'll get ROC curve - that'd be cute.

    print('Finished! :o)')
Ejemplo n.º 6
0
def main(argv):
    if(len(sys.argv) != 3):
        print('Usage ./prepTrain.py [data_dir] [config.yml]')
        sys.exit(1)
    #Load data
    data_dir = argv[1]  + '/' #in case we forgot '/'
    print('Opening file' + argv[2])
    with open(argv[2], 'r') as configfile:
        config = yaml.safe_load(configfile)

    #TODO: since this is the first file to use, maybe add a check if all directories exist?

    image_dir = data_dir
    train_dir = data_dir
    weights_dir = data_dir + config['weights_dir']

    #Training type dependent
    training_type = config['training_type']
    print("Training type is " + training_type)
    print(config[training_type])
    your_weights = weights_dir + config[training_type]['weights']


    #check md5 of a weights file if available
    md5check(config[training_type]['weights_md5'],your_weights)


    train_files_regex = config[training_type]['train_files_regex']

    #based on get_yolo_model defaults and previous makTrain.py files
    num_class=config[training_type]['num_class']
    obj_thresh=config[training_type]['obj_thresh']
    nms_thresh=config[training_type]['nms_thresh']

    train_images =  glob.glob( image_dir + train_files_regex )
    annotations_file = train_dir + config['untrained_annotations_fname']

    max_l=config['MAX_L'] #maximal object size in pixels
    min_l=config['MIN_L']

    im_width=config['IMAGE_W'] #size of training imageas for yolo
    im_height=config['IMAGE_H']

    ##################################################
    print("Loading YOLO models")
    yolov3 = get_yolo_model(im_width,im_height,num_class,trainable=False)
    yolov3.load_weights(your_weights,by_name=True) #TODO is by_name necessary here?

    # Creating another model to provide visualisation and/or extraction of high level features

    yolov3_feats = get_yolo_model_feats(im_width,im_height,num_class,trainable=False)
    yolov3_feats.load_weights(your_weights,by_name=True) #TODO is by_name necessary here?


    print("YOLO models loaded, my dear.")
    ########################################
    im_num=1
    all_imgs = []
    for imagename in train_images:
        im = cv2.imread(imagename)
        print('processing image ' + imagename + ', ' + str(im_num) + ' of ' + str(len(train_images))  + '...')
        im_yolo = makeYoloCompatible(im)
        height, width = im_yolo.shape[:2]
        im_num+=1
        n_count=0

        for x in np.arange(0,1+width-im_width,im_width):#'1+' added to allow case when image has exactly size of one window
            for y in np.arange(0,1+height-im_height,im_height):
                img_data = {'object':[]}     #dictionary? key-value pair to store image data
                head, tail = os.path.split(imagename)
                noext, ext = os.path.splitext(tail)
                save_name = train_dir + '/TR_' + noext + '-' + str(n_count) + '.png'
                box_name = train_dir + '/bbox/' + noext + '-' + str(n_count) + '.png'
                img = im[y:y+im_height,x:x+im_width,:]
                cv2.imwrite(save_name, img)
                img_data['filename'] = save_name
                img_data['width'] = im_width
                img_data['height'] = im_height

                n_count+=1
                # use the yolov3 model to predict 80 classes on COCO

                # preprocess the image
                image_h, image_w, _ = img.shape
                new_image = img[:,:,::-1]/255.
                new_image = np.expand_dims(new_image, 0)

                # run the prediction
                sys.stdout.write('Yolo predicting...')
                sys.stdout.flush()
                yolos = yolov3.predict(new_image)

                # yolo_feats = yolov3_feats.predict(new_image)
                # print(type(yolo_feats))
                # print(type(yolo_feats[1]))
                # print(yolo_feats[1].shape)
                # print(yolo_feats[1].dtype)
                # fileObject = open("feats.pickle",'wb')
                # pickle.dump(yolo_feats[1],fileObject)
                # fileObject.close()
                # print("pickedleeee")
                # cv2.imshow("heatmap",yolo_feats[1][:,:,1])
                # k = cv2.waitKey(0)

                sys.stdout.write('Decoding...')
                sys.stdout.flush()
                boxes = decode(yolos, obj_thresh, nms_thresh)
                sys.stdout.write('Done!#of boxes:')
                sys.stdout.write(str(len(boxes)))
                sys.stdout.flush()
                for b in boxes:
                    xmin=int(b[0])
                    xmax=int(b[2])
                    ymin=int(b[1])
                    ymax=int(b[3])
                    obj = {}

                    obj['name'] = 'aoi'

                    if xmin<0: continue
                    if ymin<0: continue
                    if xmax>im_width: continue
                    if ymax>im_height: continue
                    if (xmax-xmin)<min_l: continue
                    if (xmax-xmin)>max_l: continue
                    if (ymax-ymin)<min_l: continue
                    if (ymax-ymin)>max_l: continue

                    obj['xmin'] = xmin
                    obj['ymin'] = ymin
                    obj['xmax'] = xmax
                    obj['ymax'] = ymax
                    img_data['object'] += [obj]
                    cv2.rectangle(img, (xmin,ymin), (xmax,ymax), (0,255,0), 2)

                cv2.imwrite(box_name, img)
                all_imgs += [img_data]


    #print(all_imgs)
    print('Saving data to ' + annotations_file)
    with open(annotations_file, 'w') as handle:
        yaml.dump(all_imgs, handle)

    print('Finished! :o)')