def main():
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)
    #In my case I need to do it for every video folder, so

    src = "/imatge/lpanagiotis/work/Epic-Kitchens/object_detection_images"
    dst = "/imatge/lpanagiotis/work/Epic-Kitchens/saliency_maps"
    for x in ["train", "test"]:
        print("Now predicting frames for {}".format(x))
        root_path = os.path.join(src, x)
        people = os.listdir(root_path)

        for person in people:
            person_path = os.path.join(src, x, person)
            videos = os.listdir(person_path)

            for video in videos: #Every video is a directory
                # Define our source file
                source_video = os.path.join(person_path, video)

                # Define our destination directory
                frames = os.listdir(source_video)
                destination_dir = os.path.join(dst, x, person, video)
                if not os.path.exists(destination_dir):
                    os.mkdir(destination_dir)
                print("destination is {}".format(destination_dir))
                test(path_to_images=source_video, path_output_maps=destination_dir, model_to_test=model)
Example #2
0
def main(in_folder, out_folder):
    # Create network
    model = ModelBCE(256, 192, batch_size=8)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path= 'pretrained_weights/ft45_18_model6_gen_', epochtoload=150, layernum=54)
    # Here need to specify the path to images and output path
    test(path_to_images=in_folder, path_output_maps=out_folder, model_to_test=model)
def main():
    # Create network
    #model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=1)
    model = ModelBCE(448, 448, batch_size=1)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=145)
    # Here need to specify the path to images and output path
    videofile = open(VideoNameFile, 'r')
    allline = videofile.readlines()
    for line in allline:
        lindex = line.index('\t')
        VideoIndex = int(line[:lindex])
        VideoName = line[lindex + 1:-2]
        print VideoName
        VideoCap = cv2.VideoCapture(Video_dir + '/' + VideoName + '.mp4')
        fps = float(VideoCap.get(cv2.CAP_PROP_FPS))
        VideoFrame = int(VideoCap.get(cv2.CAP_PROP_FRAME_COUNT))
        videoWriter = cv2.VideoWriter('./out/' + VideoName + '_Salganmore.avi',
                                      cv2.VideoWriter_fourcc(
                                          'D', 'I', 'V', 'X'),
                                      fps, (w, h),
                                      isColor=False)
        while VideoCap.get(cv2.CAP_PROP_POS_FRAMES) < VideoFrame:
            _, frame = VideoCap.read()
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame2 = frame.astype(np.uint8)
            saliency_map = predict(model, frame2)
            Out_frame = np.uint8(saliency_map)
            videoWriter.write(saliency_map)
        videoWriter.release()
        VideoCap.release()
Example #4
0
def main(in_folder, out_folder):
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)
    # Here need to specify the path to images and output path
    test(path_to_images=in_folder, path_output_maps=out_folder, model_to_test=model)
Example #5
0
def main():
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=1)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)
    # Here need to specify the path to images and output path
    test(path_to_images='../images/', path_output_maps='../saliency2/', model_to_test=model)
def main():
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=1)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)
    # Here need to specify the path to images and output path
    namelist = os.listdir(Video_dir)
    for videonameind in namelist:
        VideoName = videonameind[:-4]
        VideoCap = cv2.VideoCapture(Video_dir + '/' + videonameind)
        #print(os.path.join(Video_dir,VideoName, VideoName))
        VideoSize = (int(VideoCap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                     int(VideoCap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        print VideoSize
        VideoFrame = int(VideoCap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = float(VideoCap.get(cv2.CAP_PROP_FPS))
        print('New video: %s with %d frames and size of (%d, %d)' %
              (VideoName, VideoFrame, VideoSize[1], VideoSize[0]))
        out = cv2.VideoWriter('./out/' + VideoName + '.avi',
                              cv2.VideoWriter_fourcc('D', 'I', 'V', 'X'),
                              fps,
                              VideoSize,
                              isColor=False)
        while VideoCap.get(cv2.CAP_PROP_POS_FRAMES) < VideoFrame:
            _, frame = VideoCap.read()
            frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame2 = frame2.astype(np.uint8)
            saliency_map = predict(model, frame2)
            Out_frame = np.uint8(saliency_map)
            out.write(Out_frame)
        out.release()
    VideoCap.release()
def main():
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=1)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='minglanggen_', epochtoload=145)
    # Here need to specify the path to images and output path
    test(path_to_images='/home/s/re/minglang_for_salgan/test_images/',
         path_output_maps='/home/s/re/minglang_for_salgan/testout/',
         model_to_test=model)
Example #8
0
def main():
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], 10, 0.05, 1e-5, 0.99)
    #model = ModelSALGAN(INPUT_SIZE[0], INPUT_SIZE[1],9,0.01,1e-05,0.01,0.2)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path="bce_weights/gen_", epochtoload=10)
    # Here need to specify the path to images and output path
    test(path_to_images=pathToImages,
         path_output_maps=pathToResMaps,
         model_to_test=model)
Example #9
0
def main(input_dir):
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)

    print ""
    print "Load weights..."
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)

    print "Start predicting..."
    # Here need to specify the path to images and output path
    test(path_to_images=input_dir, model_to_test=model)
Example #10
0
def main():
    # Create network
    model_name = 'models'
    epochtoload = 90
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
    print model_name + ' ' + str(epochtoload)
    load_weights(model.net['output'],
                 path=model_name + '/gen_',
                 epochtoload=epochtoload)
    test(path_to_images=PATH_TO_IMAGES,
         path_output_maps=PATH_TO_SALMAPS + '/',
         model_to_test=model)
Example #11
0
def train():
    """
    Train both generator and discriminator
    :return:
    """
    # Load data
    print 'Loading training data...'
    #with open('../saliency-2016-lsun/validationSample240x320.pkl', 'rb') as f:
    with open(TRAIN_DATA_DIR, 'rb') as f:
        train_data = pickle.load(f)
    print '-->done!'

    print 'Loading validation data...'
    # with open('../saliency-2016-lsun/validationSample240x320.pkl', 'rb') as f:
    with open(VAL_DATA_DIR, 'rb') as f:
        validation_data = pickle.load(f)
    print '-->done!'

    # Choose a random sample to monitor the training
    num_random = random.choice(range(len(validation_data)))
    validation_sample = validation_data[num_random]
    cv2.imwrite(DIR_TO_SAVE + '/validationRandomSaliencyGT.png',
                validation_sample.saliency.data)
    cv2.imwrite(DIR_TO_SAVE + '/validationRandomImage.png',
                cv2.cvtColor(validation_sample.image.data, cv2.COLOR_RGB2BGR))

    # Create network

    if flag == 'salgan':
        model = ModelSALGAN(INPUT_SIZE[0], INPUT_SIZE[1])
        # Load a pre-trained model
        load_weights(net=model.net['uconv2_1'],
                     path="gen_",
                     epochtoload=90,
                     layernum=48)  # full layernum=54
        load_weights(net=model.discriminator['prob'],
                     path='discrim_',
                     epochtoload=90,
                     layernum=20)  # full layernum=20
        salgan_batch_iterator(model, train_data, validation_sample.image.data)

    elif flag == 'bce':
        model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1])
        # Load a pre-trained model
        load_weights(net=model.net['output'], path='gen_',
                     epochtoload=90)  # fy:load pretrained BCE model
        bce_batch_iterator(model, train_data, validation_sample.image.data)
    else:
        print "Invalid input argument."
def train():
    """
    Train both generator and discriminator
    :return:
    """
    # Load data
    flag = 'bce'
    print 'Loading training data...'
    with open(
            '/home/s/re/saliency-salgan-2017-master2/minglangtrainData.pickle',
            'rb') as f:
        # with open(TRAIN_DATA_DIR, 'rb') as f:
        train_data = pickle.load(f)
    print '-->done!'

    # print 'Loading validation data...'
    # with open('../saliency-2016-lsun/validationSample240x320.pkl', 'rb') as f:
    # with open(VALIDATION_DATA_DIR, 'rb') as f:
    # validation_data = pickle.load(f)
    # print '-->done!'

    # Choose a random sample to monitor the training
    num_random = random.choice(range(len(train_data)))
    validation_sample = train_data[num_random]
    cv2.imwrite(DIR_TO_SAVE + '/validationRandomSaliencyGT.png',
                validation_sample.saliency.data)
    cv2.imwrite(DIR_TO_SAVE + '/validationRandomImage.png',
                cv2.cvtColor(validation_sample.image.data, cv2.COLOR_RGB2BGR))

    # Create network

    if flag == 'salgan':
        model = ModelSALGAN(INPUT_SIZE[0], INPUT_SIZE[1])
        # Load a pre-trained model
        #load_weights(model.net['output'], path='gen_', epochtoload=90)
        load_weights(net=model.net['output'], path='gen_', epochtoload=90)
        load_weights(net=model.discriminator['prob'],
                     path='discrim_',
                     epochtoload=90)
        salgan_batch_iterator(model, train_data)

    elif flag == 'bce':
        model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1])
        # Load a pre-trained model
        # load_weights(net=model.net['output'], path='test/gen_', epochtoload=15)
        load_weights(net=model.net['output'], path='gen_', epochtoload=90)
        bce_batch_iterator(model, train_data)
    else:
        print "Invalid input argument."
def main():
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)
    #In my case I need to do it for every video folder, so
    for i in range(1, 1001):
        print("Now predicting for video number {}".format(i))
        new_directory = os.path.join(
            "/imatge/lpanagiotis/work/DHF1K_extracted/temp", str(i))
        if not os.path.exists(new_directory):
            os.mkdir(new_directory)

        # Here need to specify the path to images and output path
        test(
            path_to_images='/imatge/lpanagiotis/work/DHF1K_extracted/frames/{}/'
            .format(i),
            path_output_maps=new_directory,
            model_to_test=model)
Example #14
0
def main():
    options = json.loads(os.environ['SMILER_PARAMETER_MAP'])
    use_default_blur = options.get('do_smoothing', 'default') == 'default'

    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)

    def compute_saliency(image_path):
        img = cv2.cvtColor(cv2.imread(image_path, cv2.IMREAD_COLOR),
                           cv2.COLOR_BGR2RGB)

        size = (img.shape[1], img.shape[0])
        blur_size = 5

        if img.shape[:2] != (model.inputHeight, model.inputWidth):
            img = cv2.resize(img, (model.inputWidth, model.inputHeight),
                             interpolation=cv2.INTER_AREA)

        blob = np.zeros((1, 3, model.inputHeight, model.inputWidth),
                        theano.config.floatX)

        blob[0, ...] = (img.astype(theano.config.floatX).transpose(2, 0, 1))

        result = np.squeeze(model.predictFunction(blob))
        saliency_map = (result * 255).astype(np.uint8)

        # resize back to original size
        saliency_map = cv2.resize(saliency_map,
                                  size,
                                  interpolation=cv2.INTER_CUBIC)
        # blur
        if use_default_blur:
            saliency_map = cv2.GaussianBlur(saliency_map,
                                            (blur_size, blur_size), 0)
        # clip again
        saliency_map = np.clip(saliency_map, 0, 255)

        return saliency_map

    run_model(compute_saliency)
Example #15
0
def train():
    """
    Train both generator and discriminator
    :return:
    """
    # Load data
    print 'Loading training data...'
    with open(pathToPickle + 'trainData.pickle', 'rb') as f:
        # with open(TRAIN_DATA_DIR, 'rb') as f:
        train_data = pickle.load(f)
    print '-->done!'

    print 'Loading validation data...'
    with open(pathToPickle + 'validationData.pickle', 'rb') as f:
        # with open(VALIDATION_DATA_DIR, 'rb') as f:
        validation_data = pickle.load(f)
    print '-->done!'

    # Choose a random sample to monitor the training
    num_random = random.choice(range(len(validation_data)))
    validation_sample = validation_data[num_random]
    cv2.imwrite('./' + DIR_TO_SAVE + '/validationRandomSaliencyGT.png',
                validation_sample.saliency.data)
    cv2.imwrite('./' + DIR_TO_SAVE + '/validationRandomImage.png',
                cv2.cvtColor(validation_sample.image.data, cv2.COLOR_RGB2BGR))

    # Create network

    if flag == 'salgan':
        model = ModelSALGAN(INPUT_SIZE[0], INPUT_SIZE[1])
        # Load a pre-trained model
        # load_weights(net=model.net['output'], path="nss/gen_", epochtoload=15)
        # load_weights(net=model.discriminator['prob'], path="test_dialted/disrim_", epochtoload=54)
        salgan_batch_iterator(model, train_data, validation_sample.image.data)

    elif flag == 'bce':
        model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1])
        # Load a pre-trained model
        # load_weights(net=model.net['output'], path='test/gen_', epochtoload=15)
        bce_batch_iterator(model, train_data, validation_sample.image.data)
    else:
        print "Invalid input argument."
def main():
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)

    src = "/imatge/lpanagiotis/work/GTEA_Gaze/frames"
    dst = "/imatge/lpanagiotis/work/GTEA_Gaze/predictions"
    if not os.path.exists(dst):
        os.mkdir(dst)

    folders = os.listdir(src)
    for recipe_folder in folders:
        complete_src = os.path.join(src, recipe_folder)
        complete_dst = os.path.join(dst, recipe_folder)
        if not os.path.exists(complete_dst):
            os.mkdir(complete_dst)
        # Here need to specify the path to images and output path
        test(path_to_images=complete_src,
             path_output_maps=complete_dst,
             model_to_test=model)
def main():
    # Create network
    model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
    # Here need to specify the epoch of model sanpshot
    load_weights(model.net['output'], path='gen_', epochtoload=90)

    src = "/imatge/lpanagiotis/projects/saliency/public_html/2016-egomon/video_clean"
    dst = "/imatge/lpanagiotis/work/Egomon/temp"

    folders = os.listdir(src)
    for folder in folders:
        frames_folder = "frames_" + folder
        src_path = os.path.join(src, folder, frames_folder)

        print("Now predicting for {}".format(folder))
        new_directory = os.path.join(dst, folder)
        if not os.path.exists(new_directory):
            os.makedirs(new_directory)

        # Here need to specify the path to images and output path
        test(path_to_images=src_path,
             path_output_maps=new_directory,
             model_to_test=model)
Example #18
0
def main(args):
    if args.mode == 'train':
        print 'Loading training data...'
        with open(args.trainset, 'rb') as f:
            train_data = pickle.load(f)
        print '-->done!'

        print 'Loading test data...'
        with open(args.valset, 'rb') as f:
            validation_data = pickle.load(f)
        print '-->done!'

        # Create network
        if args.model == 'salgan':

            model_args = [
                args.width, args.height, args.batch_size, args.lr,
                args.regul_term, args.alpha
            ]
            model = ModelSALGAN(*model_args)

            if args.resume:
                load_weights(net=model.net['output'],
                             path="weights/gen_",
                             epochtoload=args.resume)
                load_weights(net=model.discriminator['fc5'],
                             path="weights/disrim_",
                             epochtoload=args.resume)
            salgan_batch_iterator(model,
                                  train_data,
                                  validation_data,
                                  epochs=args.num_epochs)

        elif args.model == 'bce':
            model_args = [
                args.width, args.height, args.batch_size, args.lr,
                args.regul_term, args.momentum
            ]
            model = ModelBCE(*model_args)

            if args.resume:
                load_weights(net=model.net['output'],
                             path='weights/gen_',
                             epochtoload=args.resume)
            bce_batch_iterator(model,
                               train_data,
                               validation_data,
                               epochs=args.num_epochs)

        else:
            print "Invalid Model Argument."

    elif args.mode == 'test':
        model = ModelBCE()
        with np.load('../weights/gen_' +
                     "modelWeights{:04d}.npz".format(args.test_epoch)) as f:
            param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        lasagne.layers.set_all_param_values(model.net['output'], param_values)

        list_img_files = [
            k.split('/')[-1].split('.')[0]
            for k in glob.glob(os.path.join(args.imgdir, 'val_*.bmp'))
        ]
        for curr_file in tqdm(list_img_files):
            img = cv2.cvtColor(
                cv2.imread(os.path.join(args.imgdir, curr_file + '.bmp'),
                           cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)

            img = cv2.resize(img, (args.width, args.height),
                             interpolation=cv2.INTER_AREA)

            blob = np.zeros((1, 3, args.height, args.width),
                            theano.config.floatX)
            blob[0,
                 ...] = (img.astype(theano.config.floatX).transpose(2, 0, 1))

            result = np.squeeze(model.predictFunction(blob))
            seg_map = (result * 255).astype(np.uint8)

            seg_map = cv2.resize(seg_map, (args.width, args.height),
                                 interpolation=cv2.INTER_CUBIC)
            seg_map = np.clip(seg_map, 0, 255)

            cv2.imwrite(
                os.path.join(args.resdir,
                             curr_file + '_' + args.arch + '.bmp'), seg_map)

    elif args.mode == 'eval':
        evaluator = Evaluation()

        evaluator(args.gtdir, args.resdir, args.suffix)
        evaluator.print_vals()
Example #19
0
from io import BytesIO

#THEAN_FLAGS should be initialized before importing theano
device = 'cpu' if int(os.environ["GPU"]) < 0 else 'cuda{}'.format(
    os.environ["GPU"])
os.environ[
    "THEANO_FLAGS"] = "mode=FAST_RUN,device={},floatX=float32,lib.cnmem=1.0,\
                                    optimizer_including=cudnn,exception_verbosity=high".format(
        device)
sys.path.append(os.environ["SALGAN_PATH"] + "scripts/")

from utils import *
from constants import *
from models.model_bce import ModelBCE

model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)

load_weights(model.net['output'], path='gen_', epochtoload=90)


def predict(input_image):
    input_image = Image.open(BytesIO(input_image))

    img = np.array(input_image)

    size = (img.shape[1], img.shape[0])

    blur_size = 25

    if img.shape[:2] != (model.inputHeight, model.inputWidth):
        img = cv2.resize(img, (model.inputWidth, model.inputHeight),