コード例 #1
0
ファイル: Dataprovider.py プロジェクト: SoulDuck/Find_Wally
    def __init__(self, wholeImg_dir, faceImg_dir, anns):
        self.img_prc = ImageProcessing()
        self.wholeImg_dir = wholeImg_dir
        self.faceImg_dir = faceImg_dir
        self.anns = anns
        #
        self.face_paths = glob.glob(os.path.join(faceImg_dir, '*.jpg'))
        self.whole_paths = glob.glob(os.path.join(wholeImg_dir, '*.jpg'))
        self.names = get_names(self.face_paths)
        #
        f = open(self.anns, 'r')
        lines = f.readlines()
        # not include first line
        self.infos = {}
        #
        print '# line {}'.format(len(lines))

        for line in lines[1:][:]:
            fname, x1, x2, y1, y2, w, h = line.split(',')
            x1, x2, y1, y2, w, h = map(lambda x: int(x.strip()),
                                       [x1, x2, y1, y2, w, h])
            coord = [x1, y1, x2, y2, w, h]

            whole_img = self.img_prc.path2img(os.path.join(
                self.wholeImg_dir, fname),
                                              resize=None)
            face_img = self.img_prc.path2img(os.path.join(
                self.faceImg_dir, fname),
                                             resize=None)
            self.infos[fname] = {
                'coord': coord,
                'whole_img': whole_img,
                'face_img': face_img
            }
コード例 #2
0
ファイル: decisor.py プロジェクト: camilo912/smart_car
    def predict(self, img):
        img = cv2.resize(img, (self.shape[1], self.shape[0]))
        outs = self.sess.run('output:0',
                             feed_dict={'input:0': np.expand_dims(img, 0)})
        names = {j: i for i, j in utils.get_names().items()}

        return outs[0]
コード例 #3
0
ファイル: Dataprovider.py プロジェクト: SoulDuck/Find_Wally
    def get_background_imgs(self, n_bg, savedir):
        bg_train_path = os.path.join(savedir, 'bg_train.npy')
        bg_test_path = os.path.join(savedir, 'bg_test.npy')
        bg_val_path = os.path.join(savedir, 'bg_val.npy')
        paths = np.asarray(
            glob.glob(os.path.join('background', 'cropped_bg', '*')))
        names = np.asarray(get_names(paths))

        if os.path.exists(bg_train_path) and os.path.exists(
                bg_test_path) and os.path.exists(bg_val_path):
            self.bg_train_imgs = np.load(bg_train_path)
            self.bg_test_imgs = np.load(bg_test_path)
            self.bg_val_imgs = np.load(bg_val_path)
        else:
            print 'Generating Not Wally Data....'
            indices = random.sample(range(len(paths)), len(paths))[:n_bg]
            #
            paths = paths[indices]
            #
            imgs = self.img_prc.paths2imgs(paths, (64, 64))
            print 'background images : {}'.format(np.shape(imgs))

            # get background train ,val test images
            self.bg_train_imgs, self.bg_val_imgs, self.bg_test_imgs = self.img_prc.divide_TVT(
                imgs, 0.1, 0.1)
            # Save Numpy
            np.save(os.path.join(savedir, 'bg_train.npy'), self.bg_train_imgs)
            np.save(os.path.join(savedir, 'bg_test.npy'), self.bg_test_imgs)
            np.save(os.path.join(savedir, 'bg_val.npy'), self.bg_val_imgs)
コード例 #4
0
def get_saved_api():
    response = jsonify({
        'car_model_names': utils.get_names(),
        'Location': utils.get_locations(),
        'Owner_Type': utils.get_own_type(),
        'Fuel_Type': utils.get_fuel(),
        'Transmission': utils.get_tranmn(),
    })
    response.headers.add('Access-Control-Allow-Origin', '*')
    return response
コード例 #5
0
ファイル: builder.py プロジェクト: chenyfsysu/PythonNote
	def visit_Assign(self, node):
		scope = self.scope
		enable_locals = self.enableStaticLocals()
		for target in node.targets:
			for name in utils.get_names(target, pure_only=False) or []:
				if scope.identify(name) == NT_GLOBAL_EXPLICIT:
					self.rootScope.removeLocals(name)
				else:
					enable_locals and scope.addLocals(name, node)
		self.genricVisit(node)
コード例 #6
0
    def __init__(self):
        self.names = utils.get_names()
        self.actions = {j:i for i,j in self.names.items()}
        # special case for stopping the car when a stop or red light signal is detected
        self.actions[-1] = 'stop'

        # parts
        self.sender = Sender()
        self.decisor = Decisor()
        self.capture = Capture()
コード例 #7
0
 def answer(self, question):
     pred_relation = www2fb(get_relation(question, self.questions, self.model, self.index2rel, self.args))
     query_tokens = get_query_text(question, self.questions, self.ent_model, self.index2tag, self.args)
     
     N = min(len(query_tokens), 3)
     
     C = []  # candidate entities
     for n in range(N, 0, -1):
         ngrams_set = find_ngrams(query_tokens, n)
         for ngram_tuple in ngrams_set:
             ngram = " ".join(ngram_tuple)
             ngram = strip_accents(ngram)
             # unigram stopwords have too many candidates so just skip over
             if ngram in stopwords:
                 continue
             ## PROBLEM! - ngram doesnt exist in index - at test-2592 - KeyError: 'p.a.r.c.e. parce'
             try:
                 cand_mids = self.index_ent[ngram]  # search entities
             except:
                 continue
             C.extend(cand_mids)
         if (len(C) > 0):
             break
         break
     
     C_pruned = []
     for mid in set(C):
         if mid in self.index_reach.keys():  # PROBLEM: don't know why this may not exist??
             count_mid = C.count(mid)  # count number of times mid appeared in C
             C_pruned.append((mid, count_mid))
             if pred_relation in self.index_reach[mid]:
                 count_mid = C.count(mid)  # count number of times mid appeared in C
                 C_pruned.append((mid, count_mid))
     
     num_entities_fbsubset = 1959820  # 2M - 1959820 , 5M - 1972702
     C_tfidf_pruned = []
     for mid, count_mid in C_pruned:
         if mid in self.index_names.keys():
             cand_ent_name = pick_best_name(question, self.index_names[mid])
             tfidf = calc_tf_idf(query_tokens, cand_ent_name, count_mid, num_entities_fbsubset, self.index_ent)
             C_tfidf_pruned.append((mid, cand_ent_name, tfidf))
     
     C_tfidf_pruned.sort(key=lambda t: -t[2])
     pred_ent, name_ent, score = C_tfidf_pruned[0]
     
     key = (pred_ent, pred_relation)
     if key not in self.fb_graph:
          return "UNKNOWN"
     result_mid = self.fb_graph[key]
     result_mid = list(result_mid)
     
     result = get_names(self.fb_graph, result_mid)[0]
     return result
コード例 #8
0
ファイル: app.py プロジェクト: StevenZabolotny/hw1
def search(question):
    urls = utils.get_urls(
        question
    )  #Sends question to get_urls function in utils.py, which googles query and generates list of suitable urls
    if utils.getqtype(question) == "who":
        names_dict = utils.get_names(urls, question)
    if utils.getqtype(question) == "when":
        names_dict = utils.get_dates(urls)
    number = 0
    for i in names_dict:
        if names_dict[i] > number:
            answer = i
            number = names_dict[i]
    return render_template("search.html",
                           question=question,
                           urls=urls,
                           names_dict=names_dict,
                           answer=answer)
コード例 #9
0
ファイル: Dataprovider.py プロジェクト: SoulDuck/Find_Wally
    def __init__(self, imgdir, anns):
        self.imgdir = imgdir
        self.anns = anns

        self.paths = glob.glob(os.path.join(self.imgdir, '*.jpg'))
        self.names = get_names(self.paths)
        # read csv
        f = open(self.anns, 'r')
        lines = f.readlines()
        # not include first line
        ctr_xs = []
        ctr_ys = []
        fnames = []
        for line in lines[1:]:
            ctr_x, ctr_y, filename = line.split(',')
            ctr_xs.append(int(ctr_x))
            ctr_ys.append(int(ctr_y))
            fnames.append(filename.strip())
        self.img_infos = zip(fnames, ctr_xs, ctr_ys)
コード例 #10
0
                    np.uint8(
                        np.maximum(np.minimum(output[0] * 255.0, 255.0), 0.0)))
                sic.imsave(
                    "%s/%04d/predictions/predictions_%06d.jpg" %
                    (model, epoch, ind),
                    np.uint8(
                        np.maximum(np.minimum(C0_imall[0] * 255.0, 255.0),
                                   0.0)))

                saver.save(sess, "%s/model.ckpt" % model)
            if epoch % 10 == 0:
                saver.save(sess, "%s/%04d/model.ckpt" % (model, epoch))

# Inference
else:
    test_low = utils.get_names(test_dir)
    numtest = len(test_low)
    print(test_low[0])
    out_folder = test_dir.split('/')[-1]
    outputs = [None] * 4
    for ind in range(numtest):
        input_image_src, input_image_target, input_flow_forward_src, input_flow_backward_src = prepare_input_w_flow(
            test_low[ind], num_frames=num_frame, gray=True)
        if input_image_src is None or input_image_target is None or input_flow_forward_src is None:
            print("Not able to read the images/flows.")
            continue
        st = time.time()
        C0_imall, C1_imall, C0_im, C1_im = sess.run(
            [objDict["prediction_0"], objDict["prediction_1"], C0, C1],
            feed_dict={
                input_i: input_image_src,
コード例 #11
0
def loop_request_wrapper(params):
    # print("params:{}".format(params))
    my_params = get_my_params(params, os.getpid())
    start = datetime.datetime.now()
    urls = [
        get_req_url(options, ("name=%s" % (my_params[i][1], )))
        for i in range(len(my_params))
    ]
    tasks = [asyncio.ensure_future(request(url)) for url in urls]
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(tasks))

    end = datetime.datetime.now()
    print('-' * 80)
    print("my pid: %s, parent pid: %s, start at %s, end at %s" %
          (os.getpid(), os.getppid(), start, end))


if __name__ == "__main__":
    print(datetime.datetime.now())
    print("cpu count:", cpu_count)
    pool = multiprocessing.Pool(cpu_count)

    names = get_names(options.task_num)
    params = [(i, names[i]) for i in range(options.task_num)]

    pool.map(loop_request_wrapper, [params for i in range(cpu_count)])
    pool.close()
    pool.join()
    print(datetime.datetime.now())
コード例 #12
0
ファイル: builder.py プロジェクト: chenyfsysu/PythonNote
	def visit_Delete(self, node):
		for target in node.targets:
			self.scope.batchRemoveLocals(utils.get_names(target, pure_only=False) or [])
		self.genricVisit(node)
コード例 #13
0
ファイル: walker.py プロジェクト: chenyfsysu/PythonNote
 def postvisit_Delete(self, node):
     for target in node.targets:
         self.scope.batchRemoveLocals(
             utils.get_names(target, pure_only=False) or [])
     return node
コード例 #14
0
ファイル: walker.py プロジェクト: chenyfsysu/PythonNote
 def postvisit_Assign(self, node):
     scope = self.scope
     for target in node.targets:
         for name in utils.get_names(target, pure_only=False) or []:
             scope.addLocals(name, node)
コード例 #15
0
ファイル: main.py プロジェクト: yilunchen27/AutoNE
def get_result(dataset_name,
               target_model,
               task,
               kargs,
               sampled_dir='',
               debug=debug,
               cache=cache):
    rs = utils.RandomState()
    rs.save_state()
    rs.set_seed(0)
    embedding_filename = utils.get_names(target_model, **kargs)
    if task == 'classification':
        cf = os.path.abspath(
            os.path.join('result/{}'.format(dataset_name), sampled_dir, 'cf',
                         embedding_filename))
    elif task == 'link_predict':
        cf = os.path.abspath(
            os.path.join('result/{}'.format(dataset_name), sampled_dir, 'lp',
                         embedding_filename))
    embedding_filename = os.path.abspath(
        os.path.join('embeddings/{}'.format(dataset_name), sampled_dir,
                     embedding_filename))
    dataset_filename = os.path.abspath(
        os.path.join('data/{}'.format(dataset_name), sampled_dir,
                     'graph.edgelist'))
    if target_model != 'gcn':
        if (not cache) or (not os.path.exists(embedding_filename)) or (
                os.path.getmtime(embedding_filename) <
                os.path.getmtime(dataset_filename)):
            utils.run_target_model(target_model,
                                   dataset_filename,
                                   os.path.dirname(embedding_filename),
                                   embedding_test_dir=embedding_test_dir,
                                   debug=debug,
                                   **kargs)
        if (not cache) or (not os.path.exists(cf)) or (
                os.path.getmtime(cf) < os.path.getmtime(embedding_filename)):
            if task == 'classification':
                labels = os.path.abspath(
                    os.path.join(os.path.dirname(dataset_filename),
                                 'label.txt'))
            elif task == 'link_predict':
                labels = os.path.abspath(
                    os.path.join(os.path.dirname(dataset_filename)))
            utils.run_test(task,
                           dataset_name, [embedding_filename],
                           labels,
                           cf,
                           embedding_test_dir=embedding_test_dir)
    else:
        if (not cache) or (not os.path.exists(cf)):
            data_path = os.path.abspath(
                os.path.join('data/{}'.format(dataset_name)))
            with utils.cd(
                    os.path.join(embedding_test_dir, 'src/baseline/gcn/gcn')):
                cmd = ('python3 main.py' +\
                        ' --epochs {} --hidden1 {} --learning_rate {}' +\
                        ' --output_filename {} --debug {} --dataset {} --input_dir {}').format(kargs['epochs'], kargs['hidden1'], kargs['learning_rate'], cf, debug, dataset_name, data_path)
                if debug:
                    print(cmd)
                else:
                    cmd += ' > /dev/null 2>&1'
                os.system(cmd)
    rs.load_state()
    res = np.loadtxt(cf, dtype=float)
    if len(res.shape) != 0:
        res = res[0]
    return res
コード例 #16
0
 def __init__(self, opt):
     self.opt = opt
     self.namelist = utils.get_names(opt.baseroot)
        g0 = VCN(utils.build(tf.tile(input_i[:, :, :, 0:1], [1, 1, 1, 3])),
                 reuse=False)
        g1 = VCN(utils.build(tf.tile(input_i[:, :, :, 1:2], [1, 1, 1, 3])),
                 reuse=True)

saver = tf.train.Saver(max_to_keep=1000)
sess.run([tf.global_variables_initializer()])

var_restore = [v for v in tf.trainable_variables()]
saver_restore = tf.train.Saver(var_restore)
ckpt = tf.train.get_checkpoint_state(model)
print('loaded ' + ckpt.model_checkpoint_path)
saver_restore.restore(sess, ckpt.model_checkpoint_path)

if not len(test_img):
    img_names = utils.get_names(test_dir)
    ind = 0
    for img_name in img_names:
        im = np.float32(scipy.misc.imread(img_name, 'L')) / 255.0
        h = im.shape[0] // 32 * 32
        w = im.shape[1] // 32 * 32
        im = im[np.newaxis, :h, :w, np.newaxis]
        st = time.time()
        output = sess.run(
            g0, feed_dict={input_i: np.concatenate((im, im), axis=3)})
        print("test time for %s --> %.3f" % (ind, time.time() - st))
        folder = test_dir.split('/')[-1]
        if not os.path.isdir("%s/%s" % (model, folder)):
            for idx in range(5):
                os.makedirs("%s/%s/result%d" % (model, folder, idx))
        out_all = np.concatenate(
コード例 #18
0
    def resize_npImages(self, np_imgs, resize):

        ret_imgs = []
        for i, img in enumerate(np_imgs):
            utils.show_progress(i, len(np_imgs))
            img = Image.fromarray(img).convert('RGB').resize(resize)
            img = np.asarray(img)
            ret_imgs.append(img)
        return np.asarray(ret_imgs)


if __name__ == '__main__':

    paths = glob.glob(os.path.join('foreground', 'original_fg', '*'))
    names = get_names(paths)

    # Foreground
    img_processing = ImageProcessing()
    # Load Image from paths
    imgs = img_processing.paths2imgs(paths, (64, 64))
    print 'foreground images : {}'.format(np.shape(imgs))
    #
    fg_train_imgs, fg_val_imgs, fg_test_imgs = img_processing.divide_TVT(
        imgs, 0.1, 0.1)
    np.save('fg_train.npy', fg_train_imgs)
    np.save('fg_test.npy', fg_test_imgs)
    np.save('fg_val.npy', fg_val_imgs)
    # Background
    paths = np.asarray(glob.glob(os.path.join('background', 'cropped_bg',
                                              '*')))
コード例 #19
0
var_restore = [v for v in tf.trainable_variables()]
saver_restore = tf.train.Saver(var_restore)
ckpt = tf.train.get_checkpoint_state("pretrained_models/" + model)
print('loaded ' + ckpt.model_checkpoint_path)
saver_restore.restore(sess, ckpt.model_checkpoint_path)

folder = video_path.split('/')[-1]
output_dir = ARGS.video_path[:-1] + "_colorized" if ARGS.video_path.endswith(
    "/") else ARGS.video_path + "_colorized"

# if ARGS.output_dir is None:
#     output_dir = "test_result/{}/{}".format(model, folder)
# else:
#     output_dir = ARGS.output_dir
if img_path is None:
    img_names = utils.get_names(video_path)
    ind = 0
    for frame_id, img_name in enumerate(img_names):
        im = np.array(Image.open(img_name).convert("L")) / 255.
        h = im.shape[0] // 32 * 32
        w = im.shape[1] // 32 * 32
        # h = 320
        # w = 320

        im = im[np.newaxis, :h, :w, np.newaxis]

        # print(im.shape)

        st = time.time()
        output = sess.run(
            g0, feed_dict={input_i: np.concatenate((im, im), axis=3)})
コード例 #20
0
ファイル: app.py プロジェクト: zeniroy/codenamu2014
def home():
    return render_template('home.html', id_names=get_names())
コード例 #21
0

def loop_request_wrapper(params):
    start = datetime.datetime.now()
    urls = [
        get_req_url(options, "name=".format(params[1]))
        for _ in range(options.task_num)
    ]
    tasks = [asyncio.ensure_future(request(url)) for url in urls]
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(tasks))
    end = datetime.datetime.now()
    print("visit %s" % (urls))
    print("my pid: %s, parent pid: %s, start at %s, end at %s" %
          (os.getpid(), os.getppid(), start, end))
    print('-' * 80)


if __name__ == "__main__":
    print(datetime.datetime.now())
    cpu_count = multiprocessing.cpu_count()
    print("cpu count:", cpu_count)
    pool = multiprocessing.Pool(cpu_count)

    names = get_names(options.process_num)
    params = [(i, names[i]) for i in range(options.process_num)]
    pool.map(loop_request_wrapper, params)
    pool.close()
    pool.join()
    print(datetime.datetime.now())
コード例 #22
0
    def random_char(self, printable=True):
        NewChar = self.random_name()
        given, sur = utils.get_names(NewChar)

        gender = self.gender_from_name(NewChar)

        if gender == "either":
            gender = random.choice(["male", "female"])

        if gender == "male":
            opronoun = "his"
            pronoun = "he"
        else:
            pronoun = "she"
            opronoun = "her"

        PackageClass, PackageBG, PackageStr, PackageInt, PackageWis, PackageCon, PackageDex, PackageCha, PrimaryStat = self._get_package(
            sur)
        bgstr = "%s the %s %s %s\n" % (
            NewChar, PackageClass, utils.background_from(),
            PackageBG.replace(
                '$name', NewChar).replace('$gender', gender).replace(
                    '. $pronoun', '. ' +
                    pronoun.capitalize()).replace('$pronoun', pronoun).replace(
                        '. $opronoun', '. ' + opronoun.capitalize()).replace(
                            '$opronoun', opronoun))
        gold = utils.gen_stat(0) * 10 * self.currency_exchange
        if printable:
            returnval = "Name: %s\n" % NewChar
            returnval += "Gender: %7s    Class: %s    Primary: %s\n" % (
                gender, PackageClass, PrimaryStat)

            returnval += self._print_two_stats('str', PackageStr, 'int',
                                               PackageInt, PrimaryStat)
            returnval += self._print_two_stats('wis', PackageWis, 'con',
                                               PackageCon, PrimaryStat)
            returnval += self._print_two_stats('dex', PackageDex, 'cha',
                                               PackageCha, PrimaryStat)
            returnval += "Starting gold (based on %s exchange): %s\n" % (
                self.currency, gold)
            returnval += "----------------------\n"
            returnval += bgstr
            return returnval
        returnval = "{\n"
        returnval += "  \"character\": {\n"
        returnval += "    \"name\": \"%s\",\n" % NewChar
        returnval += "    \"gender\": \"%s\",\n" % gender
        returnval += "    \"class\": \"%s\",\n" % PackageClass
        returnval += "    \"primary_stat\": \"%s\",\n" % PrimaryStat
        returnval += "    \"starting_gold\": \"%f\",\n" % gold
        returnval += "    \"background\": \"%s\",\n" % bgstr
        returnval += "    \"statblock\": {\n"
        returnval += "       \"stat\": [\n"
        returnval += self._json_stat('str', PackageStr) + ",\n"
        returnval += self._json_stat('int', PackageInt) + ",\n"
        returnval += self._json_stat('wis', PackageWis) + ",\n"
        returnval += self._json_stat('con', PackageCon) + ",\n"
        returnval += self._json_stat('dex', PackageDex) + ",\n"
        returnval += self._json_stat('cha', PackageCha) + "\n"
        returnval += "       ]\n"
        returnval += "    }\n"
        returnval += "  }\n"
        returnval += "}\n"
        return returnval
コード例 #23
0
    def gender_from_name(self, name):
        given, sur = utils.get_names(name)
        pos = self._given_names.index(given.lower())

        return self._given_names_gender[pos]