예제 #1
0
def check_stamps(metadatafile,imagedir,train_frames,outdir):
    os.mkdir(outdir)
    metadata = tb.tabarray(SVfile=metadatafile)
    #get labels for training objects

    train_labels_inds = []
    for cn,fr in train_frames:
        inds = ((metadata['Frame'] == fr) & (metadata['clip_num'] == cn) & (metadata['ObjectType'] != 'DCR')).nonzero()[0]
        #ind = inds[t['object_number']]
        train_labels_inds.extend(inds)
    train_labels = metadata[train_labels_inds]
    #get stamps for training objects

    train_points = []
    train_points_labels = []
    sizes = []
    num_train = 0
    for label in train_labels:
        lbl = label['clip_num'] + '_' + str(label['Frame']) + '.jpg'
        print(label)
        framefile = os.path.join(imagedir,lbl)
        im = get_image(framefile)

        box = bbox.BoundingBox(xs = [label[xf] for xf in xfields],
                               ys = [label[yf] for yf in yfields])
        stamp = bbox.stamp(im,box,stamp_shape=(200,200))[0]
        if stamp is not None:
            img = Image.fromarray(stamp)
            img.save(os.path.join(outdir,str(num_train) + '.jpg'))
            num_train += 1
예제 #2
0
def detect_evaluate_spots(model,imagedir,train_frames,metadatafile,clf,stamp_shape,pset,num_empties=None):
    filters = fg.get_hierarchical_filterbanks(model['config']['model']['layers'])
    
    metadata = tb.tabarray(SVfile=metadatafile)
    #get labels for training objects
    transform_config = {'transform_name':'translation','percentile':pset}
    train_labels_inds = []
    for cn,fr in train_frames:
        inds = ((metadata['Frame'] == fr) & (metadata['clip_num'] == cn) & (metadata['ObjectType'] != 'DCR')).nonzero()[0]
        #ind = inds[t['object_number']]
        train_labels_inds.extend(inds)
    train_labels = metadata[train_labels_inds]
    #get stamps for training objects

    train_points = []
    train_points_labels = []
    sizes = []
    num_train = 0
    for label in train_labels:
        lbl = label['clip_num'] + '_' + str(label['Frame']) + '.jpg'
        print(label)
        framefile = os.path.join(imagedir,lbl)
        im = get_image(framefile)
        box = bbox.BoundingBox(xs = [label[xf] for xf in xfields],
                               ys = [label[yf] for yf in yfields])
        stamp = bbox.stamp(im,box,stamp_shape=stamp_shape)[0]
        if stamp is not None:
            sizes.append(stamp.shape)
            print(stamp.shape)
            try:
                features = get_features(model,filters,stamp)
            except:
                print(label,'is bad')
            else:
                num_train += 1
                features = {'0':features}
                feature_stats = transform_average(features,transform_config,model)
                train_points.append(feature_stats)
                train_points_labels.append(1)

    num_empties = (num_empties is not None) or num_train
    for ind in range(num_empties):
        print('empty',ind)
        im = get_random_empty_bbox(train_labels,sizes,imagedir)
        try:
            features = get_features(model,filters,im)
        except:
            print('empty', ind, 'is bad')
        else:
            features = {'0':features}
            feature_stats = transform_average(features,transform_config,model)
            train_points.append(feature_stats)
            train_points_labels.append(0)

    train_points = np.array(train_points)
    train_points_labels = np.array(train_points_labels)

    prediction = clf.predict(train_points)
    return prediction,train_points_labels
예제 #3
0
def detect_train(model,metadatafile,imagedir,train_frames,num_empties=None,regress=True,points=True,pset=None,stamp_shape=0):
    if points is False:
        assert regress == False, 'regress must be false if points is False'
        assert pset is not None, 'pset must not be nont if points is False'
        assert stamp_shape > 0
        transform_config = {'transform_name':'translation','percentile':pset}

    filters = fg.get_hierarchical_filterbanks(model['config']['model']['layers'])
    
    metadata = tb.tabarray(SVfile=metadatafile)
    #get labels for training objects

    train_labels_inds = []
    for cn,fr in train_frames:
        inds = ((metadata['Frame'] == fr) & (metadata['clip_num'] == cn) & (metadata['ObjectType'] != 'DCR')).nonzero()[0]
        #ind = inds[t['object_number']]
        train_labels_inds.extend(inds)
    train_labels = metadata[train_labels_inds]
    #get stamps for training objects

    train_points = []
    train_points_labels = []
    sizes = []
    num_train = 0
    for label in train_labels:
        lbl = label['clip_num'] + '_' + str(label['Frame']) + '.jpg'
        print(label)
        framefile = os.path.join(imagedir,lbl)
        im = get_image(framefile)
        box = bbox.BoundingBox(xs = [label[xf] for xf in xfields],
                               ys = [label[yf] for yf in yfields])
        stamp = bbox.stamp(im,box,stamp_shape=stamp_shape)[0]
        if stamp is not None:
            sizes.append(stamp.shape)
            print(stamp.shape)
            try:
                features = get_features(model,filters,stamp)
            except:
                print(label,'is bad')
            else:
                num_train += 1
                if points:
                    feature_points,sh = get_feature_points(features)
                    train_points.extend(feature_points)
                    vecs = get_positions((box.width,box.height),features,regress=regress)
                    train_points_labels.extend(vecs)
                else:
                    features = {'0':features}
                    feature_stats = transform_average(features,transform_config,model)
                    train_points.append(feature_stats)
                    train_points_labels.append(1)

    num_empties = (num_empties is not None) or num_train
    for ind in range(num_empties):
        print('empty',ind)
        im = get_random_empty_bbox(train_labels,sizes,imagedir)
        try:
            features = get_features(model,filters,im)
        except:
            print('empty', ind, 'is bad')
        else:
            if points:
                feature_points,sh = get_feature_points(features)
                train_points.extend(feature_points)
                if regress:
                    vecs = [(-100,-100) for ind in range(len(feature_points))]
                else:
                    vecs = [0 for ind in range(len(feature_points))]
                train_points_labels.extend(vecs)
            else:
                features = {'0':features}
                feature_stats = transform_average(features,transform_config,model)
                train_points.append(feature_stats)
                train_points_labels.append(0)

    train_points = np.array(train_points)
    train_points_labels = np.array(train_points_labels)

    #run regression
    if regress:
        clf = linear_model.LinearRegression()
    else:
        clf = svm.LinearSVC()
    clf.fit(train_points,train_points_labels)

    return clf