예제 #1
0
from Embedding import *
from Annotation import JHMDBAnnotator as JA, Evaluator as EVAL
import numpy as np
import random
import time
from configs import getConfigs

conf = getConfigs(43)
snapshot_path = conf.solver['snapshot_prefix']
db = conf.db_settings[conf.db]
action = 'pour'
video = db['video_name'][action][0]
level = db['level']
dataset_path = db['pickle_path'].format(action_name='pour',
                                        video_name=video,
                                        level=level)
#dataset_path = '/cs/vml3/mkhodaba/cvpr16/code/embedding_segmentation/dataset/{name}'
net_path = conf.model['model_prototxt_path']
#net_path = {name}'
annotation_path = db['annotation_path'].format(action_name=action,
                                               video_name=video)
#annotation_path = '/cs/vml3/mkhodaba/cvpr16/dataset/{name}'
#snapshot_name = '_iter_870000.caffemodel'
snapshot_name = '_iter_25000.caffemodel'

#model_name = 'model.prototxt'
test_model_path = '/cs/vml2/mkhodaba/cvpr16/expriments/8/test.prototxt'
snapshot_path = conf.solver[
    'snapshot_prefix']  #'/cs/vml3/mkhodaba/cvpr16/code/embedding_segmentation/snapshot/vml_gpu/256bins/'

print net_path
import caffe
from numpy import zeros
import numpy as np

from configs import getConfigs

conf = getConfigs(8)


model_prototxt_path = conf.model['model_prototxt_path']
solver_prototxt_path = conf.solver['_solver_prototxt_path']


#root = '/cs/vml3/mkhodaba/cvpr16/code/embedding_segmentation/model/'

#caffe.set_decive(2)
caffe.set_mode_gpu()
#caffe.set_device(2)



test_interval = conf.solver['test_interval'] #10000
niter = conf.solver['max_iter'] #500000
train_interval = conf.solver['_train_interval'] #1000
termination_threshold = conf.solver['_termination_threshold']
net = caffe.Net(model_prototxt_path, caffe.TRAIN)

#solver = caffe.SGDSolver(root+'solver.prototxt')
solver = caffe.SGDSolver(solver_prototxt_path)
# losses will also be stored in the log
#train_loss = zeros(niter)
예제 #3
0
    return reps

def computeDistanceMatrix(representations):
    return -1*(representations.dot(representations.T))

def getBaselineRepresentations(segment, k, feature_type):
    segment.__class__ = MySegmentation
    features = MySegmentation.getFeatures(segment,k, feature_type=feature_type)
    return features

if __name__ == '__main__':
    print "START"
    caffe.set_mode_gpu()
    #caffe.set_decive(2)
    if len(sys.argv) == 1:
    	conf = getConfigs(-1)
    else:
    	conf = getConfigs(int(sys.argv[1]))

    logger = Logger(LogType.FILE, conf.solver['_solver_log_path'])
    threshold = 0.6

    db = conf.db_settings
    model_prototxt_path = conf.model['model_prototxt_path']
    solver_prototxt_path = conf.solver['_solver_prototxt_path']
    test_interval = conf.solver['test_interval'] #10000
    niter = conf.solver['max_iter'] #max(100000, conf.solver['max_iter']) #500000
    train_interval = conf.solver['_train_interval'] #1000
    termination_threshold = conf.solver['_termination_threshold']
    print "net"
    net = caffe.Net(model_prototxt_path, caffe.TRAIN)
def create_dbs():
    configs = getConfigs()

    frame_format = configs.frame_format
    seg_path = configs.seg_path
    orig_path = configs.orig_path
    first_output = configs.first_output
    output_path = configs.output_path
    dataset_path = configs.dataset_path
    annotation_path = configs.annotation_path
    n_neg = configs.number_of_negatives
    print 'n_neg \t= \t' + str(n_neg)
    feature_name = '256bin'
    level = 2
    segmentors = []
    vid_num = 2
    frames_per_video = 31
    if 1 == 1:
        for dd in range(vid_num):
            d = dd+1
            print 'b{0}'.format(d)
            annotator = JA(annotation_path.format(name='b'+str(d)))
            segmentor = MySegmentation(orig_path.format(d)+frame_format, seg_path.format(d,level)+frame_format, annotator, negative_neighbors=n_neg)
            for i in range(1, frames_per_video):
                print "processing frame {i}".format(i=i)
                segmentor.processNewFrame()
            segmentor.doneProcessing()
            segmentors.append(segmentor)
            print "Total number of supervoxels: {0}".format(len(segmentor.supervoxels))
            print

        try:
            mkdirs(dataset_path)
        except:
            pass
        print 'Piclking ...'
        t = time.time()
        for i in range(vid_num):
            pickle.dump(segmentors[i], open(dataset_path.format(name='segment_{0}.p'.format(i+1)), 'w'))
            print '{0}-th done. time elapsed: {1}'.format(i+1, time.time()-t)
            t = time.time()

        #TODO create database
    else:
        for i in range(vid_num):
            segmentors.append(pickle.load(open(dataset_path.format(name='segment_{0}.p'.format(i+1)), 'r')))


    database = DB(dataset_path.format(name='videos{v}_feature{f}_lvl{l}.h5'.format(\
                            v='_'.join(map(str,range(1,vid_num))),
                            f=feature_name,
                            l=level)))

    print 'Collecting features ...'
    neighbor_num = 6
    keys = ['target', 'negative'] + [ 'neighbor{0}'.format(i) for i in range(neighbor_num)]
    features = segmentors[0].getFeatures(neighbor_num)
    print 'shape features', features['target'].shape
    feats = [features]
    print 'video 1 done!'
    for i in range(1, len(segmentors)):
        tmp = segmentors[i].getFeatures(neighbor_num)
        #feats.append(tmp)
        for key in keys:
            features[key] = np.append(features[key], tmp[key], axis=0)
        print 'video {0} done!'.format(i+1)
    #print data
    #database_path = '
    print 'saving to database ...'
    for name, data in features.iteritems():
        database.save(data, name)
    #database.save(dataset)
    database.close()


    print 'done!'
예제 #5
0
def create_dbs():
    configs = getConfigs()

    frame_format = configs.frame_format
    seg_path = configs.seg_path
    orig_path = configs.orig_path
    first_output = configs.first_output
    output_path = configs.output_path
    dataset_path = configs.dataset_path
    annotation_path = configs.annotation_path
    action
    feature_name = '256bin'
    level = 2
    segmentors = []
    vid_num = 2
    frames_per_video = 31
    if 1 == 1:
        for dd in range(vid_num):
            d = dd + 1
            print 'b{0}'.format(d)
            annotator = JA(annotation_path.format(name='b' + str(d)))
            segmentor = MySegmentation(
                orig_path.format(d) + frame_format,
                seg_path.format(d, level) + frame_format, annotator)
            for i in range(1, frames_per_video):
                print "processing frame {i}".format(i=i)
                segmentor.processNewFrame()
            segmentor.doneProcessing()
            segmentors.append(segmentor)
            print "Total number of supervoxels: {0}".format(
                len(segmentor.supervoxels))
            print

        try:
            mkdirs(dataset_path)
        except:
            pass
        print 'Piclking ...'
        t = time.time()
        for i in range(vid_num):
            pickle.dump(
                segmentors[i],
                open(dataset_path.format(name='segment_{0}.p'.format(i + 1)),
                     'w'))
            print '{0}-th done. time elapsed: {1}'.format(
                i + 1,
                time.time() - t)
            t = time.time()

        #TODO create database
    else:
        for i in range(vid_num):
            segmentors.append(
                pickle.load(
                    open(
                        dataset_path.format(name='segment_{0}.p'.format(i +
                                                                        1)),
                        'r')))


    database = DB(dataset_path.format(name='videos{v}_feature{f}_lvl{l}.h5'.format(\
          v='_'.join(map(str,range(1,vid_num))),
          f=feature_name,
          l=level)))

    print 'Collecting features ...'
    neighbor_num = 6
    keys = ['target', 'negative'
            ] + ['neighbor{0}'.format(i) for i in range(neighbor_num)]
    features = segmentors[0].getFeatures(neighbor_num)
    print 'shape features', features['target'].shape
    feats = [features]
    print 'video 1 done!'
    for i in range(1, len(segmentors)):
        tmp = segmentors[i].getFeatures(neighbor_num)
        #feats.append(tmp)
        for key in keys:
            features[key] = np.append(features[key], tmp[key], axis=0)
        print 'video {0} done!'.format(i + 1)
    #print data
    #database_path = '
    print 'saving to database ...'
    for name, data in features.iteritems():
        database.save(data, name)
    #database.save(dataset)
    database.close()

    print 'done!'
        if x[1]:
            print 'x[1]:', ['-' + x[0]]
            return ['-'+x[0]]
        else:
            print 'x[0]:', ['']
            return ['']
[in_args.extend(select(x)) for x in options.__dict__.items() if x[1] is not None]

result = subprocess.Popen(in_args, stdout=subprocess.PIPE)
result = result.stdout.read().split('\n')
pprint.pprint(result)

exp_id = [x for x in result if x.startswith('Experiment number: ')][0]
args = exp_id.split(': ')[-1]
os.system('python new_solver.py ' + args)
conf = getConfigs(int(args))
snapshot_path = conf.solver['snapshot_prefix']
exp_root = os.path.basename(os.path.dirname(snapshot_path[:-1]))
exp_root = conf.experiment_folder_name
os.system('python compute_similarities.py -e ' + args)
#os.system('python compute_similarities_vox2pix.py ' + args)
#os.system('mv -f ' + conf.experiments_path + '/similarities.mat '+ conf.experiments_path + '/similarities_1.mat')
#result = subprocess.Popen(in_args.append['-F', 'FCN', '-E', exp_root], stdout=subprocess.PIPE)
#result = result.stdout.read().split('\n')
#os.system('python new_solver.py ' + args)
#os.system('python compute_similarities.py -e ' + args)
#x = np.add(loadmat(conf.experiments_path + 'similarities_1.mat')['similarities'], loadmat(conf.experiments_path + 'similarities.mat')['similarities'])
#np.savemat(conf.experiments_path + 'similarities.mat', similarities=x)
from scipy.io import loadmat
try:
    loadmat('/local-scratch/experiments/' + exp_root + '/similarities.mat')
예제 #7
0
import caffe
from numpy import zeros
import numpy as np

from configs import getConfigs

conf = getConfigs(8)

model_prototxt_path = conf.model['model_prototxt_path']
solver_prototxt_path = conf.solver['_solver_prototxt_path']

#root = '/cs/vml3/mkhodaba/cvpr16/code/embedding_segmentation/model/'

#caffe.set_decive(2)
caffe.set_mode_gpu()
#caffe.set_device(2)

test_interval = conf.solver['test_interval']  #10000
niter = conf.solver['max_iter']  #500000
train_interval = conf.solver['_train_interval']  #1000
termination_threshold = conf.solver['_termination_threshold']
net = caffe.Net(model_prototxt_path, caffe.TRAIN)

#solver = caffe.SGDSolver(root+'solver.prototxt')
solver = caffe.SGDSolver(solver_prototxt_path)
# losses will also be stored in the log
#train_loss = zeros(niter)
train_loss = np.array([])
test_acc = zeros(int(np.ceil(niter / test_interval)))
#output = zeros((niter, 8, 10))
test_loss = 0
예제 #8
0

def getBaselineRepresentations(segment, k, feature_type):
    segment.__class__ = MySegmentation
    features = MySegmentation.getFeatures(segment,
                                          k,
                                          feature_type=feature_type)
    return features


if __name__ == '__main__':
    print "START"
    caffe.set_mode_gpu()
    #caffe.set_decive(2)
    if len(sys.argv) == 1:
        conf = getConfigs(-1)
    else:
        conf = getConfigs(int(sys.argv[1]))

    logger = Logger(LogType.FILE, conf.solver['_solver_log_path'])
    threshold = 0.6

    db = conf.db_settings
    model_prototxt_path = conf.model['model_prototxt_path']
    solver_prototxt_path = conf.solver['_solver_prototxt_path']
    test_interval = conf.solver['test_interval']  #10000
    niter = conf.solver[
        'max_iter']  #max(100000, conf.solver['max_iter']) #500000
    train_interval = conf.solver['_train_interval']  #1000
    termination_threshold = conf.solver['_termination_threshold']
    print "net"
from Embedding import *
from Annotation import JHMDBAnnotator as JA, Evaluator as EVAL
import numpy as np
import random
import time
from configs import getConfigs

conf = getConfigs(43)
snapshot_path = conf.solver['snapshot_prefix']
db = conf.db_settings[conf.db]
action = 'pour'
video = db['video_name'][action][0]
level = db['level']
dataset_path = db['pickle_path'].format(action_name='pour', video_name=video, level=level)
#dataset_path = '/cs/vml3/mkhodaba/cvpr16/code/embedding_segmentation/dataset/{name}'
net_path = conf.model['model_prototxt_path']
#net_path = {name}'	
annotation_path = db['annotation_path'].format(action_name=action, video_name=video)
#annotation_path = '/cs/vml3/mkhodaba/cvpr16/dataset/{name}'
#snapshot_name = '_iter_870000.caffemodel'
snapshot_name = '_iter_25000.caffemodel'

#model_name = 'model.prototxt'
test_model_path = '/cs/vml2/mkhodaba/cvpr16/expriments/8/test.prototxt'
snapshot_path = conf.solver['snapshot_prefix']#'/cs/vml3/mkhodaba/cvpr16/code/embedding_segmentation/snapshot/vml_gpu/256bins/'	

print net_path

print dataset_path
print