Exemple #1
0
def det_label_init():
    # Load the vocabulary
    vocab_file = 'vocabs/vocab_train.pkl'
    vocab = utils.load_variables(vocab_file)

    # Set up Caffe
    caffe.set_mode_gpu()
    caffe.set_device(0)

    # Load the model
    mean = np.array([[[103.939, 116.779, 123.68]]]);
    base_image_size = 565;
    prototxt_deploy = 'visual_concepts/code/output/vgg/mil_finetune.prototxt.deploy'
    model_file = 'visual_concepts/code/output/vgg/snapshot_iter_240000.caffemodel'
    model = test_model.load_model(prototxt_deploy, model_file, base_image_size, mean, vocab)
    # define functional words
    functional_words = ['a', 'on', 'of', 'the', 'in', 'with', 'and', 'is', 'to', 'an', 'two', 'at', 'next', 'are']
    is_functional = np.array([x not in functional_words for x in vocab['words']])

    # load the score precision mapping file
    eval_file = visual_concepts/code/code/output/vgg/snapshot_iter_240000.caffemodel_output/coco_valid1_eval.pkl'
    pt = utils.load_variables(eval_file);

    # Set threshold_metric_name and output_metric_name
    threshold_metric_name = 'prec';
    output_metric_name = 'prec';
    return model,functional_words,threshold_metric_name,output_metric_name,vocab,is_functional,pt
def main():
    (x_train, y_train) = ut.load_dataset()
    print("Dataset loaded...")
    model_def = tm.load_model('trained_model')
    y_predicted = model_def.predict(x_train)
    np.savetxt("gt", y_train)
    np.savetxt("pred", y_predicted)
    path_input_image = '../../Dataset/GehlerShi_input/'
    path_output = '../../Dataset/Prediction/'
    file_names = []
    for i in range(1, 569):
        file_names.append('00' + ut.zero_string(4-ut.nr_digits(i)) + str(i))
    for index,file_name in enumerate(file_names):
        image_blob = Image.open(os.path.join(path_input_image, file_name+".png"))
        gt_lumminance = y_train[index]
        pred_lumminance = y_predicted[index]
        White_bal_groundtruth = to_pil(white_balance(image_blob,gt_lumminance))
        White_bal_prediction = to_pil(white_balance(image_blob,pred_lumminance))
        White_bal_groundtruth.save(os.path.join(path_output, file_name+"_gt.png"))
        White_bal_prediction.save(os.path.join(path_output, file_name+"_pred.png"))
Exemple #3
0
from test_model import (load_model, get_prediction)

from http.server import HTTPServer, BaseHTTPRequestHandler
import ssl
import json
import traceback
import os

# server config
PORT = 3000
SSL_KEY = "ssl/private.key"
SSL_CERT = "ssl/ca_bundle.crt"

config, tokenizer, model = load_model(
    "chess/bert-base-multilingual-uncased_English_translated_baseline_32/")


class ModelRequestHandler(BaseHTTPRequestHandler):
    def __init__(self, *args):
        super().__init__(*args)

    def do_GET(self):
        try:
            if self.path == "/":
                self.path = "/index.html"

            if self.path == "they_actually_said_that.txt":
                with open(self.path, "r") as document:
                    self.send_response(200)
                    self.send_header("Content-type", "text/html")
                    self.end_headers()
def main():

    start_time = time.time()
    # Get dataset categories
    categories = load_categories()

    # Load RGB model
    model_id = 1
    model = load_model(model_id, categories).cuda()

    # Load the video frame transform
    transform = load_transform()

    mode_folder = args.video_file.split("/")[-1]
    if not mode_folder:
        raise Exception("DONT GIMME THAT LAST SLASH IN THE VIDEO_FILE plz.")
    feature_dir = "/media/lili/fce9875a-a5c8-4c35-8f60-db60be29ea5d/extracted_features_moments_raw/%s" % mode_folder

    if not os.path.exists(feature_dir):
        os.makedirs(feature_dir)
    for subdir in sorted(os.listdir(args.video_file)):
        print("subdir :", subdir)
        video_dir = os.path.join(args.video_file, subdir)

        logits_list = []
        features_list = []
        video_name_list = []

        for video_name in sorted(os.listdir(video_dir)):

            single_video_path = os.path.join(video_dir, video_name)

            frames = extract_frames(single_video_path, args.num_segments)

            # Prepare input tensor
            data = torch.stack([transform(frame) for frame in frames])

            input_var = Variable(data.view(-1, 3, data.size(2), data.size(3)),
                                 volatile=True).cuda()

            # Extract features before the fully connected layers
            res50_before_fc = FeatureExtractor(model)

            # Make video prediction
            logits = model(input_var)
            features = res50_before_fc(input_var)

            logits_np = logits.data.cpu().numpy()
            logits_list.append(logits_np)

            # save features before the fully connected layer

            features_list.append(np.squeeze(features.data.cpu().numpy()))

            stored_video_name = os.path.join(subdir, video_name)
            video_name_list.append(stored_video_name)

        np.save(os.path.join(feature_dir, "{}_logits.npy".format(subdir)),
                np.asarray(logits_list))
        np.save(os.path.join(feature_dir, "{}_names.npy".format(subdir)),
                np.asarray(video_name_list))
        np.save(os.path.join(feature_dir, "{}_features.npy".format(subdir)),
                np.asarray(features_list))
    parser = argparse.ArgumentParser(description="test TRN on a single video")
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--video_file', type=str, default=None)
    group.add_argument('--frame_folder', type=str, default=None)
    parser.add_argument('--rendered_output', type=str, default=None)
    parser.add_argument('--num_segments', type=int, default=8)

    group.add_argument('--videos_dir', type=str, default=None)
    args = parser.parse_args()

    # Get dataset categories
    categories = load_categories()

    # Load RGB model
    model_id = 1
    model = load_model(model_id, categories)

    # Load the video frame transform
    transform = load_transform()

    all_scenes = list(
        map(
            os.path.abspath,
            map(lambda d: os.path.join(args.videos_dir, d),
                os.listdir(args.videos_dir))))
    all_scenes = list(filter(lambda f: f.endswith('mp4'), all_scenes))
    out_dir = os.path.abspath(os.path.join(args.videos_dir, '../scores_final'))
    os.makedirs(out_dir, exist_ok=True)

    # Obtain video frames
    for scene in all_scenes:
Exemple #6
0
        for seed in region:
            # Each seed is a function of its region, ie region 1 seed 1 is new_seed 1, region 3 seed 1 is new_seed 33
            # The actual function is (seed # + (16 * (region # - 1)). Index is already equal to region # - 1, since "regions" list starts at index 0
            # Add value to bracket_64
            bracket_64.append(seed + (16 * index))

    return bracket_64


if __name__ == '__main__':

    logging.basicConfig(level=logging.DEBUG)
    year = 2018

    model = training.Lin_Relu(20)
    model = test_model.load_model(".\\Models\\500_epochs", model)
    # model = test_model.load_model(".\\Models\\5_epochs", model)

    kp_df = kenpom_df(".\\Training_Data\\KenPom_Complete.csv", year)

    # Can't simply build 64 team bracket, need four sections of 16 team brackets that play against each other
    region1 = generate_tournament(16)

    regions = [region1] * 4

    bracket_64 = create_64_bracket(regions)

    # tourney_dict functions as match data, while maintaining easily accessible team information based on region and seed
    tourney_dict = match_teams_seeds(".\\Training_Data\\Match_Data.csv", year)

    total_score = 0
Exemple #7
0
def loadModel(deployProtoPath, modelPath, vocab, base_image_size, infType):
  mean = np.array([[[ 103.939, 116.779, 123.68]]]);
  model = tm.load_model(deployProtoPath, modelPath, base_image_size, mean, vocab);
  model['inf_type'] = infType;
  return model
def loadModel(deployProtoPath, modelPath, vocab, base_image_size, infType):
    mean = np.array([[[103.939, 116.779, 123.68]]])
    model = tm.load_model(deployProtoPath, modelPath, base_image_size, mean,
                          vocab)
    model['inf_type'] = infType
    return model
Exemple #9
0

# Load the vocabulary
vocab_file = 'vocabs/vocab_train.pkl'
vocab = utils.load_variables(vocab_file)

# Set up Caffe
caffe.set_mode_gpu()
caffe.set_device(0)

# Load the model
mean = np.array([[[ 103.939, 116.779, 123.68]]]);
base_image_size = 565;    
prototxt_deploy = 'output/vgg/mil_finetune.prototxt.deploy'
model_file = 'output/vgg/snapshot_iter_240000.caffemodel'
model = test_model.load_model(prototxt_deploy, model_file, base_image_size, mean, vocab)


# In[3]:


# define functional words
functional_words = ['a', 'on', 'of', 'the', 'in', 'with', 'and', 'is', 'to', 'an', 'two', 'at', 'next', 'are']
is_functional = np.array([x not in functional_words for x in vocab['words']])

# load the score precision mapping file
eval_file = 'output/vgg/snapshot_iter_240000.caffemodel_output/coco_valid1_eval.pkl'
pt = utils.load_variables(eval_file);

# Set threshold_metric_name and output_metric_name
threshold_metric_name = 'prec'; output_metric_name = 'prec';