コード例 #1
0
import numpy as np
import tensorflow as tf
from datasets import data as dataset
from models.nn import GCN as ConvNet
from learning.evaluators import AccuracyEvaluator as Evaluator
from learning.utils import draw_pixel
""" 1. Load dataset """
root_dir = os.path.join('data/catdog/')  # FIXME
test_dir = os.path.join(root_dir, 'test')

# Set image size and number of class
IM_SIZE = (512, 512)
NUM_CLASSES = 3

# Load test set
X_test, y_test = dataset.read_data(test_dir, IM_SIZE)
test_set = dataset.DataSet(X_test, y_test)
""" 2. Set test hyperparameters """
hp_d = dict()

# FIXME: Test hyperparameters
hp_d['batch_size'] = 8
""" 3. Build graph, load weights, initialize a session and start test """
# Initialize
graph = tf.get_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

model = ConvNet([IM_SIZE[0], IM_SIZE[1], 3], NUM_CLASSES, **hp_d)
evaluator = Evaluator()
saver = tf.train.Saver()
コード例 #2
0
import os
import numpy as np
import tensorflow as tf
from datasets import data as dataset
from models.nn import YOLO as ConvNet
from learning.utils import draw_pred_boxes, predict_nms_boxes, convert_boxes
import cv2
""" 1. Load dataset """
root_dir = os.path.join('data/face')
test_dir = os.path.join(root_dir, 'test')

IM_SIZE = (416, 416)
NUM_CLASS = 1

# Load test set
X_test, y_test = dataset.read_data(test_dir, IM_SIZE, no_label=True)
test_set = dataset.DataSet(X_test, y_test)

# Sanity check
print('Test set stats:')
print(test_set.images.shape)
print(test_set.images.min(), test_set.images.max())
""" 2. Set test hyperparameters """
# image_mean = np.load('/tmp/data_mean.npy')
anchors = dataset.load_json(os.path.join(test_dir, 'anchors.json'))
class_map = dataset.load_json(os.path.join(test_dir, 'classes.json'))
nms_flag = True
hp_d = dict()
# hp_d['image_mean'] = image_mean
hp_d['batch_size'] = 16
hp_d['nms_flag'] = nms_flag
コード例 #3
0
ファイル: train.py プロジェクト: sunnys-lab/tf-segmentation
from datasets import data as dataset
from models.nn import GCN as ConvNet
# from learning.optimizers import AdamOptimizer as Optimizer
from learning.optimizers import MomentumOptimizer as Optimizer
from learning.evaluators import AccuracyEvaluator as Evaluator

""" 1. Load and split datasets """
root_dir = os.path.join('data/catdog/') # FIXME
trainval_dir = os.path.join(root_dir, 'train')

# Set image size and number of class
IM_SIZE = (512, 512)
NUM_CLASSES = 3

# Load trainval set and split into train/val sets
X_trainval, y_trainval = dataset.read_data(trainval_dir, IM_SIZE)
trainval_size = X_trainval.shape[0]
val_size = int(trainval_size * 0.1) # FIXME
val_set = dataset.DataSet(X_trainval[:val_size], y_trainval[:val_size])
train_set = dataset.DataSet(X_trainval[val_size:], y_trainval[val_size:])

""" 2. Set training hyperparameters"""
hp_d = dict()

# FIXME: Training hyperparameters
hp_d['batch_size'] = 8
hp_d['num_epochs'] = 100
hp_d['init_learning_rate'] = 1e-3
hp_d['momentum'] = 0.9
hp_d['learning_rate_patience'] = 10
hp_d['learning_rate_decay'] = 0.1
コード例 #4
0
from datasets import data as dataset
from models.nn import DCGAN as GAN
# from learning.optimizers import AdamOptimizer as Optimizer
from learning.optimizers import MomentumOptimizer as Optimizer
from learning.evaluators import FIDEvaluator as Evaluator
""" 1. Load and split datasets """
root_dir = os.path.join('data/FFHQ/')  # FIXME
# root_dir = os.path.join('data/celeba-dataset/img_align_celeba') # FIXME
trainval_dir = os.path.join(root_dir, 'thumbnails128x128')
# trainval_dir = os.path.join(root_dir, 'img_align_celeba')

# Set image size and number of class
IM_SIZE = (64, 64)

# Load trainval set and split into train/val sets
X_trainval = dataset.read_data(trainval_dir, IM_SIZE, 108)
trainval_size = X_trainval.shape[0]
train_set = dataset.Dataset(X_trainval)
print(train_set.num_examples)
""" 2. Set training hyperparameters"""
hp_d = dict()

save_dir = './DCGAN_training_FFHQ_z_90_linear_02/'

# FIXME: Training hyperparameters
hp_d['batch_size'] = 64
hp_d['num_epochs'] = 45
hp_d['init_learning_rate'] = 2e-4
hp_d['momentum'] = 0.5
hp_d['learning_rate_patience'] = 10
hp_d['learning_rate_decay'] = 1.0
コード例 #5
0
import numpy as np
from datasets import data as dataset
from models.yolov2 import YOLO as ConvNet
from learning.evaluators import RecallEvaluator as Evaluator
from learning.utils import predict_nms_boxes, convert_boxes
from utils.visualization import draw_pred_boxes
import cv2
import glob
""" 1. Load dataset """
root_dir = os.path.join('data/face')
test_dir = os.path.join(root_dir, 'test')
IM_SIZE = (416, 416)
NUM_CLASSES = 1

# Load test set
X_test, y_test = dataset.read_data(test_dir, IM_SIZE, order='CHW')
test_set = dataset.DataSet(X_test, y_test)
""" 2. Set test hyperparameters """
anchors = dataset.load_json(os.path.join(test_dir, 'anchors.json'))
class_map = dataset.load_json(os.path.join(test_dir, 'classes.json'))
nms_flag = True
hp_d = dict()
hp_d['batch_size'] = 16
hp_d['nms_flag'] = nms_flag
""" 3. Build graph, load weights, initialize a session and start test """
# Initialize

model = ConvNet([3, IM_SIZE[0], IM_SIZE[1]], NUM_CLASSES, anchors)
model.restore('./yolov2.pth')
model.cuda()
evaluator = Evaluator()
コード例 #6
0
# from learning.optimizers import MomentumOptimizer as Optimizer
from learning.optimizers import AdamOptimizer as Optimizer
from learning.evaluators import RecallEvaluator as Evaluator
""" 1. Load and split datasets """
root_dir = os.path.join('data/face/')  # FIXME
trainval_dir = os.path.join(root_dir, 'train')

# Load anchors
anchors = dataset.load_json(os.path.join(trainval_dir, 'anchors.json'))

# Set image size and number of class
IM_SIZE = (416, 416)
NUM_CLASSES = 1

# Load trainval set and split into train/val sets
X_trainval, y_trainval = dataset.read_data(trainval_dir, IM_SIZE, order='CHW')
trainval_size = X_trainval.shape[0]
val_size = int(trainval_size * 0.1)  # FIXME
val_set = dataset.DataSet(X_trainval[:val_size], y_trainval[:val_size])
train_set = dataset.DataSet(X_trainval[val_size:], y_trainval[val_size:])
""" 2. Set training hyperparameters"""
hp_d = dict()

# FIXME: Training hyperparameters
hp_d['batch_size'] = 2
hp_d['num_epochs'] = 50
hp_d['init_learning_rate'] = 1e-5
hp_d['learning_rate_patience'] = 10
hp_d['learning_rate_decay'] = 0.1
hp_d['score_threshold'] = 1e-4
hp_d['nms_flag'] = True