Ejemplo n.º 1
0
def setup_dir():
    params = utils.read_params()
    DIR = params["DIRS"]
    for d in DIR.values():
        utils.make_dir(d)

    utils.check_params_json("params.json")
Ejemplo n.º 2
0
def setup_dir():  # 用于创建一些需要的路径
    params = utils.read_params()  # 读取超参
    DIR = params["DIRS"]  # 从读取的超参中读取DIR字典
    for d in DIR.values():  # 遍历DIR字典
        utils.make_dir(d)  # 创建路径

    utils.check_params_json(
        "params.json")  # 用于检查params_json文件是否存在,若是不存在,就写一个空的
Ejemplo n.º 3
0
def evaluate_all(gt_file_dir, gt_img_dir, ckpt_path, gpuid='0'):
    db = DB(ckpt_path, gpuid)

    img_list = os.listdir(gt_img_dir)

    show = './eva'
    make_dir(show)

    total_TP = 0
    total_gt_care_num = 0
    total_pred_care_num = 0
    for img_name in tqdm.tqdm(img_list):
        img = cv2.imread(os.path.join(gt_img_dir, img_name))

        pred_box_list, pred_score_list, _ = db.detect_img(os.path.join(
            gt_img_dir, img_name),
                                                          ispoly=True,
                                                          show_res=False)

        gt_file_name = os.path.splitext(img_name)[0] + '.txt'

        gt_boxes, tags = load_ctw1500_labels(
            os.path.join(gt_file_dir, gt_file_name))

        gt_care_list = []
        gt_dontcare_list = []

        for i, box in enumerate(gt_boxes):
            box = box.reshape((-1, 2)).tolist()
            if tags[i] == False:
                gt_care_list.append(box)
            else:
                gt_dontcare_list.append(box)

        precision, recall, f1_score, TP, gt_care_num, pred_care_num, pairs_list = evaluate(
            gt_care_list, gt_dontcare_list, pred_box_list, overlap=0.5)

        for pair in pairs_list:
            cv2.polylines(img,
                          [np.array(pair['gt'], np.int).reshape([-1, 1, 2])],
                          True, (0, 255, 0))
            cv2.polylines(img,
                          [np.array(pair['pred'], np.int).reshape([-1, 1, 2])],
                          True, (255, 0, 0))

        cv2.imwrite(os.path.join(show, img_name), img)

        total_TP += TP
        total_gt_care_num += gt_care_num
        total_pred_care_num += pred_care_num

    total_precision = float(total_TP) / total_pred_care_num
    total_recall = float(total_TP) / total_gt_care_num
    total_f1_score = compute_f1_score(total_precision, total_recall)

    return total_precision, total_recall, total_f1_score
Ejemplo n.º 4
0
    def step(self, data, label, step_type):
        utils.make_dir(self.MODEL_DIR)  # 如果目录文件夹不存在,就新建
        cur_dir = self.get_cur_epoch_dir()  # 获取当前epoch存储路径
        data_npy, label_npy = utils.load_npy(data), utils.load_npy(
            label)  # 读取之前存下来的数据和标签
        data_npy = data_npy[:, ::24]
        feed_dict = {
            self.X: data_npy,
            self.Y_onehot: label_npy
        }  # 将数据放入feed_dict中

        if step_type == "train":  # 如果是在训练
            fetches = [
                self.apply_grad, self.loss, self.summary_op, self.print,
                self.step_count, self.metrics_op
            ]
            out = self.sess.run(fetches, feed_dict)
            loss, summary, step_count = out[1], out[2], out[4]

            self.train_writer.add_summary(summary, global_step=step_count)
        elif step_type == "debug":
            fetchs = [self.apply_grad]
            options = tf.RunOptions(trace_level=3)
            run_metadata = tf.RunMetadata()
            out = self.sess.run(fetches,
                                feed_dict,
                                options=options,
                                run_metadata=run_metadata)
        else:
            fetchs = [
                self.softmax, self.loss, self.summary_op, self.print_iou,
                self.step_count, self.metrics_op
            ]
            out = self.sess.run(fetchs, feed_dict)
            softmax, loss, summary, step_count = out[0], out[1], out[2], out[4]

            if step_type == "val":
                self.val_writer.add_summary(summary, global_step=step_count)
            elif step_type == "test":
                self.test_writer.add_summary(summary, global_step=step_count)

            # display the result of each element of the validation batch
            if self.params["VIS"]["VALIDATION_STEP"]:
                i = random.randint(0, len(data_npy) - 1)
                x, y, yp = data_npy[i], label_npy[i], softmax[i]
                name = "{}\\{}_{}".format(cur_dir, step_count,
                                          utils.get_file_name(data[i])[0:-2])
                vis.img_sequence(x, "{}_x.png".format(name))
                vis.voxel_binary(y, "{}_y.png".format(name))
                vis.voxel_binary(yp, "{}_yp.png".format(name))

        return loss
def write_renders_to_disk(mesh, renders, render_count=10):
    print("[write_renders_to_disk] writing renders to {0} ... ".format(
        renders))
    # FIXME: stupid but clean
    os.system("rm -rf {}".format(renders))
    utils.make_dir(renders)
    scene = mesh.scene()
    for i in range(render_count):
        angle = math.radians(random.randint(15, 30))
        axis = random.choice([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        rotate = trimesh.transformations.rotation_matrix(
            angle, axis, scene.centroid)
        camera_old, _geometry = scene.graph['camera']
        camera_new = np.dot(camera_old, rotate)
        scene.graph['camera'] = camera_new
        # backfaces culled if using original trimesh package
        scene.save_image(
            '{0}/{1}_{2}.png'.format(renders, os.path.basename(renders), i), resolution=(127, 127))

    return
Ejemplo n.º 6
0
    def read_params(self, params=None):  # 用于读取超参
        if params is None:  # 如果没有给定超参
            self.params = utils.read_params()  # 就读取默认超参
        else:  # 如果给定了超参
            self.params = params  # 就用给定的超参

        if self.params["TRAIN"][
                "INITIALIZER"] == "XAVIER":  # 如果超参中的初始化设定为XAVIER
            self.init = tf.contrib.layers.xavier_initializer(
            )  # 用Xavier初始化(使每一层输出的方差应该尽量相等)
        else:  # 如果超参中的初始化不是设定为XAVIER
            self.init = tf.random_normal_initializer()  # 生成标准正态分布的随机数来初始化

        self.CREATE_TIME = datetime.now().strftime(
            "%Y-%m-%d_%H.%M.%S")  # 获取当前的时间,并将其转换为字符串格式存储下来,作为创建时间
        self.MODEL_DIR = "{}\\model_{}".format(  # string.format()的功能是,将后面的参数填到前面的{}中去
            self.params["DIRS"]["MODELS_LOCAL"], self.CREATE_TIME)  # 得到待创建的路径名
        utils.make_dir(self.MODEL_DIR)  # 新建路径

        with open(self.MODEL_DIR + '\\params.json', 'w') as f:
            json.dump(self.params, f)  # 将超参写成json文件
 def save_ranking_artworks(self, dir_path, order='popular-1-month', type='visual-art', content='all', category='all', limit=30):
     print(f'download {order} {content} {category} ranking\n')
     dir_path = utils.make_dir(dir_path, f'{order} {content} {category} ranking')
     artworks = self.ranking_artworks(order, type, content, category, limit, dir_path)
     if not artworks:
         print(f'{order} {content} {category} ranking is up-to-date\n')
         return
     with ThreadPool(self.threads) as pool:
         files = pool.map(partial(self.save_artwork, dir_path), artworks)
     print(f'\ndownload for {order} {content} {category} ranking completed\n')
     combined_files = utils.counter(files)
     utils.set_files_mtime(combined_files['name'], dir_path)
     return combined_files
 def save_user_artworks(self, user_id, dir_path):
     print(f'download artworks for user {user_id}\n')
     dir_path = utils.make_dir(dir_path, user_id)
     artworks = self.user_artworks(user_id, dir_path)
     if not artworks:
         print(f'user {user_id} is up-to-date\n')
         return
     with ThreadPool(self.threads) as pool:
         files = pool.map(partial(self.save_artwork, dir_path), artworks)
     print(f'\ndownload for user {user_id} completed\n')
     combined_files = utils.counter(files)
     utils.set_files_mtime(combined_files['name'], dir_path)
     return combined_files
Ejemplo n.º 9
0
 def save_rankings(self, mode, content, date, limit, dir_path):
     print(f"download {mode} {content} rankings\n")
     dir_path = utils.make_dir(dir_path, f"{mode} {content} rankings")
     artworks = self.rankings_artworks(mode, content, date, limit, dir_path)
     if not artworks:
         print(f"{mode} {content} rankings are up-to-date\n")
         return
     with ThreadPool(self.threads) as pool:
         files = pool.map(partial(self.save_artwork, dir_path), artworks)
     print(f"\ndownload for {mode} {content} rankings completed\n")
     combined_files = utils.dict_counter(files)
     utils.set_files_mtime(combined_files["names"], dir_path)
     return combined_files
Ejemplo n.º 10
0
 def save_bookmarks(self, user_id, dir_path):
     username = self.user(user_id)["name"]
     print(f"download bookmarks for user {username}\n")
     dir_path = utils.make_dir(dir_path, str(user_id) + " bookmarks")
     artworks = self.user_bookmarks_artworks(user_id, dir_path)
     if not artworks:
         print(f"user {username} is up-to-date\n")
         return
     with ThreadPool(self.threads) as pool:
         files = pool.map(partial(self.save_artwork, dir_path), artworks)
     print(f"\ndownload for user {username} completed\n")
     combined_files = utils.dict_counter(files)
     utils.set_files_mtime(combined_files["names"], dir_path)
     return combined_files
Ejemplo n.º 11
0
 def save_artist(self, artist_id, dir_path):
     artist_name = self.artist(artist_id)["name"]
     print(f"download for artist {artist_name} begins\n")
     dir_path = utils.make_dir(dir_path, artist_id)
     artworks = self.artist_artworks(artist_id, dir_path)
     if not artworks:
         print(f"artist {artist_name} is up-to-date\n")
         return
     with ThreadPool(self.threads) as pool:
         files = pool.map(partial(self.save_artwork, dir_path), artworks)
     print(f"\ndownload for artist {artist_name} completed\n")
     combined_files = utils.counter(files)
     utils.file_mtimes(combined_files["names"], dir_path)
     return combined_files
Ejemplo n.º 12
0
 def save_artist(self, artist_id, dir_path, stop=None):
     gallery = self.gallery(artist_id)
     artist_name = gallery["artist_name"]
     print(f"download for author {artist_name} begins\n")
     dir_path = utils.make_dir(dir_path, artist_name)
     artwork_urls = self.artist_artworks(artist_id, stop)
     if not artwork_urls:
         print(f"author {artist_name} is up-to-date\n")
         return
     with ThreadPool(self.threads) as pool:
         files = pool.map(partial(self.save_artwork, dir_path), artwork_urls)
     print(f"\ndownload for author {artist_name} completed\n")
     combined_files = utils.counter(files)
     utils.set_files_mtime(combined_files["name"], dir_path)
     return combined_files
Ejemplo n.º 13
0
 def save_collection_artworks(self, collection, dir_path):
     collection_metadata = self.collection_metadata(collection)
     collection_name = collection_metadata["name"]
     collection_owner = collection_metadata["owner"]["username"]
     amount = collection_metadata["size"]
     print(
         f'download {amount} artworks for collection {collection_name} by {collection_owner}\n'
     )
     dir_path = utils.make_dir(dir_path, collection_name)
     artworks = self.collection_artworks(collection, dir_path)
     if not artworks:
         print(f'collection {collection_name} is up-to-date\n')
         return
     with ThreadPool(self.threads) as pool:
         files = pool.map(partial(self.save_artwork, dir_path), artworks)
     print(f'\ndownload for collection {collection_name} completed\n')
     combined_files = utils.counter(files)
     utils.set_files_mtime(combined_files['name'], dir_path)
     return combined_files
 def create_epoch_dir(self):
     cur_ind = self.epoch_index()
     save_dir = os.path.join(self.MODEL_DIR, "epoch_{}".format(cur_ind + 1))
     utils.make_dir(save_dir)
     return save_dir
 def get_params(self):
     utils.make_dir(self.MODEL_DIR)
     with open(self.MODEL_DIR + "/params.json") as fp:
         return json.load(fp)
    def __init__(self, params=None):
        # read params
        if params is None:
            self.params = utils.read_params()
        else:
            self.params = params

        if self.params["TRAIN"]["INITIALIZER"] == "XAVIER":
            init = tf.contrib.layers.xavier_initializer()
        else:
            init = tf.random_normal_initializer()

        self.CREATE_TIME = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
        self.MODEL_DIR = "{}/model_{}".format(
            self.params["DIRS"]["MODELS_LOCAL"], self.CREATE_TIME)
        utils.make_dir(self.MODEL_DIR)

        with open(self.MODEL_DIR + '/params.json', 'w') as f:
            json.dump(self.params, f)

        # place holders
        with tf.name_scope("Data"):
            self.X = tf.placeholder(tf.float32, [None, None, None, None, None])
        with tf.name_scope("Labels"):
            self.Y_onehot = tf.placeholder(tf.float32, [None, 32, 32, 32, 2])

        pp = preprocessor.Preprocessor(self.X)
        X_preprocessed = pp.out_tensor
        n_batchsize = tf.shape(X_preprocessed)[0]

        # encoder
        print("encoder")
        if self.params["TRAIN"]["ENCODER_MODE"] == "DILATED":
            en = encoder.Dilated_Encoder(X_preprocessed)
        elif self.params["TRAIN"]["ENCODER_MODE"] == "RESIDUAL":
            en = encoder.Residual_Encoder(X_preprocessed)
        else:
            en = encoder.Simple_Encoder(X_preprocessed)
        encoded_input = en.out_tensor

        # visualize transformation of input state to voxel
        if self.params["VIS"]["ENCODER_PROCESS"]:
            with tf.name_scope("misc"):
                feature_maps = tf.get_collection("feature_maps")
                fm_list = []
                for fm in feature_maps:
                    fm_slice = fm[0, 0, :, :, 0]
                    fm_shape = fm_slice.get_shape().as_list()
                    fm_slice = tf.pad(fm_slice,
                                      [[0, 0], [127 - fm_shape[0], 0]])
                    fm_list.append(fm_slice)
                fm_img = tf.concat(fm_list, axis=0)
                tf.summary.image("feature_map_list",
                                 tf.expand_dims(tf.expand_dims(fm_img, -1), 0))

        # recurrent_module
        print("recurrent_module")
        with tf.name_scope("Recurrent_module"):
            rnn_mode = self.params["TRAIN"]["RNN_MODE"]
            n_cell = self.params["TRAIN"]["RNN_CELL_NUM"]
            n_hidden = self.params["TRAIN"]["RNN_HIDDEN_SIZE"]

            if rnn_mode == "LSTM":
                rnn = recurrent_module.LSTM_Grid(initializer=init)
                hidden_state = (
                    tf.zeros([n_batchsize, n_cell, n_cell, n_cell, n_hidden],
                             name="zero_hidden_state"),
                    tf.zeros([n_batchsize, n_cell, n_cell, n_cell, n_hidden],
                             name="zero_cell_state"))
            else:
                rnn = recurrent_module.GRU_Grid(initializer=init)
                hidden_state = tf.zeros(
                    [n_batchsize, n_cell, n_cell, n_cell, n_hidden],
                    name="zero_hidden_state")

            n_timesteps = self.params["TRAIN"]["TIME_STEP_COUNT"]
            # feed a limited seqeuence of images
            if isinstance(n_timesteps, int) and n_timesteps > 0:
                for t in range(n_timesteps):
                    hidden_state = rnn.call(encoded_input[:, t, :],
                                            hidden_state)
            else:  # feed an arbitray seqeuence of images
                n_timesteps = tf.shape(X_preprocessed)[1]

                t = tf.constant(0)

                def condition(h, t):
                    return tf.less(t, n_timesteps)

                def body(h, t):
                    h = rnn.call(encoded_input[:, t, :], h)
                    t = tf.add(t, 1)
                    return h, t

                hidden_state, t = tf.while_loop(condition, body,
                                                (hidden_state, t))

        # decoder
        print("decoder")
        if isinstance(hidden_state, tuple):
            hidden_state = hidden_state[0]
        if self.params["TRAIN"]["DECODER_MODE"] == "DILATED":
            de = decoder.Dilated_Decoder(hidden_state)
        elif self.params["TRAIN"]["DECODER_MODE"] == "RESIDUAL":
            de = decoder.Residual_Decoder(hidden_state)
        else:
            de = decoder.Simple_Decoder(hidden_state)
        self.logits = de.out_tensor

        # visualize transformation of hidden state to voxel
        if self.params["VIS"]["DECODER_PROCESS"]:
            with tf.name_scope("misc"):
                feature_voxels = tf.get_collection("feature_voxels")
                fv_list = []
                for fv in feature_voxels:
                    fv_slice = fv[0, :, :, 0, 0]
                    fv_shape = fv_slice.get_shape().as_list()
                    fv_slice = tf.pad(fv_slice,
                                      [[0, 0], [32 - fv_shape[0], 0]])
                    fv_list.append(fv_slice)
                fv_img = tf.concat(fv_list, axis=0)
                tf.summary.image("feature_voxel_list",
                                 tf.expand_dims(tf.expand_dims(fv_img, -1), 0))

        # loss
        print("loss")
        voxel_loss = loss.Voxel_Softmax(self.Y_onehot, self.logits)
        self.loss = voxel_loss.loss
        self.softmax = voxel_loss.softmax
        tf.summary.scalar("loss", self.loss)

        # misc
        print("misc")
        with tf.name_scope("misc"):
            self.step_count = tf.Variable(0,
                                          trainable=False,
                                          name="step_count")
            self.print = tf.Print(self.loss, [self.step_count, self.loss, t])

        # optimizer
        print("optimizer")
        if self.params["TRAIN"]["OPTIMIZER"] == "ADAM":
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.params["TRAIN"]["ADAM_LEARN_RATE"],
                epsilon=self.params["TRAIN"]["ADAM_EPSILON"])
            tf.summary.scalar("adam_learning_rate", optimizer._lr)
        else:
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate=self.params["TRAIN"]["GD_LEARN_RATE"])
            tf.summary.scalar("learning_rate", optimizer._learning_rate)

        grads_and_vars = optimizer.compute_gradients(self.loss)
        self.apply_grad = optimizer.apply_gradients(
            grads_and_vars, global_step=self.step_count)

        # metric
        print("metrics")
        with tf.name_scope("metrics"):
            Y = tf.argmax(self.Y_onehot, -1)
            predictions = tf.argmax(self.softmax, -1)
            acc, acc_op = tf.metrics.accuracy(Y, predictions)
            rms, rms_op = tf.metrics.root_mean_squared_error(
                self.Y_onehot, self.softmax)
            iou, iou_op = tf.metrics.mean_iou(Y, predictions, 2)
            self.metrics_op = tf.group(acc_op, rms_op, iou_op)

        tf.summary.scalar("accuracy", acc)
        tf.summary.scalar("rmse", rms)
        tf.summary.scalar("iou", iou)

        # initalize
        # config=tf.ConfigProto(log_device_placement=True)
        print("setup")
        self.summary_op = tf.summary.merge_all()
        self.sess = tf.InteractiveSession()
        if self.params["MODE"] == "DEBUG":
            self.sess = tf_debug.TensorBoardDebugWrapperSession(
                self.sess,
                "nat-oitwireless-inside-vapornet100-c-15126.Princeton.EDU:6064"
            )

        # summaries
        print("summaries")
        if self.params["MODE"] == "TEST":
            self.test_writer = tf.summary.FileWriter(
                "{}/test".format(self.MODEL_DIR), self.sess.graph)
        else:
            self.train_writer = tf.summary.FileWriter(
                "{}/train".format(self.MODEL_DIR), self.sess.graph)
            self.val_writer = tf.summary.FileWriter(
                "{}/val".format(self.MODEL_DIR), self.sess.graph)

        # initialize
        print("initialize")
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()
        print("...done!")
Ejemplo n.º 17
0
 def get_params(self):
     utils.make_dir(self.MODEL_DIR)  # 如果路径不存在,就创建路径
     with open(self.MODEL_DIR + "\\params.json") as fp:  # 打开json文件
         return json.load(fp)  # 读取json文件
Ejemplo n.º 18
0
import cv2
import os
import glob
import sys
from lib.utils import make_dir
from lib.get_position import get_position

# =============================================================
src_dir = 'source'
dst_dir = 'output'
done_dir = 'done'
record_file = 'destination/locations.txt'

make_dir(dst_dir)
make_dir(done_dir)

# =============================================================
for src_img_path in glob.glob(src_dir + "/*/*"):
    # Creates file paths for source, destination and done images
    path_list = src_img_path.split(os.sep)
    path_list[0] = dst_dir
    dst_img_path = os.sep.join(path_list)
    path_list[0] = done_dir
    done_img_path = os.sep.join(path_list)

    os.makedirs(os.path.dirname(dst_img_path), exist_ok=True)
    os.makedirs(os.path.dirname(done_img_path), exist_ok=True)

    sub_dir = path_list[1]
    pig_img_name = path_list[-1]
Ejemplo n.º 19
0
import cv2
import os
from lib.utils import make_dir
from lib.get_position import get_position

# =============================================================
src_dir = 'source_2_level/'
dst_dir = 'dst/'
done_dir = 'done/'
record_file = 'record_2_level.txt'

make_dir(dst_dir)
make_dir(done_dir)

# =============================================================
sub_dir_list = os.listdir(src_dir)
for pig_id in range(len(sub_dir_list)):
    pig_dst_dir = os.path.join(dst_dir, str(pig_id + 1))
    pig_done_dir = os.path.join(done_dir, str(pig_id + 1))

    make_dir(pig_dst_dir)
    make_dir(pig_done_dir)

print('=' * 50)
# =============================================================

for sub_dir in sub_dir_list:
    sub_src_dir = os.path.join(src_dir, str(sub_dir))
    sub_dst_dir = os.path.join(dst_dir, str(sub_dir))
    sub_done_dir = os.path.join(done_dir, str(sub_dir))