Example #1
0
 def __init__(self) -> None:
     super().__init__()
     self._configs = Configs()
     self._pid = 0
     self._start = ""
     self._restart_count = 0
     self._in_error = False
Example #2
0
def initialize_configs(config_path):
    config_modules = []

    all_configs = config_names(config_path)

    if (len(all_configs) == 0):
        print("Error: no config files found in config path '{0}'".format(
            config_path),
              file=sys.stderr)
        sys.exit(1)

    config_helper = Configs(all_configs)
    config_helper.load_modules(config_modules)

    # Give at least one module the config helper
    config_modules[0].config_helper = config_helper

    # Step Four: Load jenni

    try:
        from __init__ import run
    except ImportError:
        try:
            from jenni import run
        except ImportError:
            print("Error: Couldn't find jenni to import", file=sys.stderr)
            sys.exit(1)

    # Step Five: Initialise And Run The jennies

    # @@ ignore SIGHUP
    for config_module in config_modules:
        run(config_module)  # @@ thread this
Example #3
0
 def __init__(self, device, mod, timeout=5000):
     self.timeout = timeout
     if isinstance(device, Device):
         self.device = device
     else:
         self.device = connect_device(device)
     self.logger = createlogger(mod)
     self.log_path = create_folder()
     self.config = GetConfigs("common")
     self.product = Configs("common").get("product", "Info")
     self.appconfig = AppConfig("appinfo", self.product)
     self.appconfig.set_section(mod)
     self.adb = self.device.server.adb
     self.suc_times = 0
     try:
         self.mod_cfg = GetConfigs(mod)
         self.test_times = 0
         self.dicttesttimes = self.mod_cfg.get_test_times()
         if mod == "Email":
             for i in self.dicttesttimes:
                 self.test_times += int(self.dicttesttimes[i])
                 if i <> 'opentimes':
                     self.test_times += int(self.dicttesttimes[i])
         elif mod == "Message":
             for i in self.dicttesttimes:
                 self.test_times += int(self.dicttesttimes[i])
                 if i == 'opentimes':
                     self.test_times += int(self.dicttesttimes[i]) * 3
         else:
             for test_time in self.dicttesttimes:
                 self.test_times += int(self.dicttesttimes[test_time])
         self.logger.info("Trace Total Times " + str(self.test_times))
     except:
         pass
Example #4
0
    def setup(self, stage):

        self.configs = Configs()
        self.dataset = CustomDataset(self.configs)
        dataset_size = len(self.dataset)
        indices = list(range(dataset_size))
        split = int(np.floor(self.configs.valSplit * dataset_size))
        self.trainIndices, self.valIndices = indices[split:], indices[:split]
Example #5
0
    def __init__(self):
        from pymongo import MongoClient, ASCENDING, DESCENDING
        from configs import Configs
        import json
        configs = Configs()
        self.client = MongoClient(configs.get('databaseIP'),
                                  configs.get('databasePort'))
        self.db = self.client['DotaSeer']

        #self.matches = self.db['Matches'].create_index(('match_id'), unique = True)
        self.heroes = self.db['Heroes'].create_index('id')
Example #6
0
    def __init__(self):
        twitter_config = Configs().twitter_config
        access_token = twitter_config.access_token
        access_token_secret = twitter_config.access_token_secret
        api_key = twitter_config.api_key
        api_secret_key = twitter_config.api_secret_key

        auth = tweepy.OAuthHandler(api_key, api_secret_key)
        auth.set_access_token(access_token, access_token_secret)

        self.client = tweepy.API(auth, wait_on_rate_limit=True)
    def __init__(self, name):
        self.configs = Configs()
        self.BOARD_COLS = self.configs.BOARD_COLS
        self.BOARD_ROWS = self.configs.BOARD_ROWS

        self.name = name
        self.lr = self.configs.lr
        self.decay_gamma = self.configs.decay_gamma
        self.exp_rate = self.configs.exp_rate

        self.states = []
        self.states_val = {}
    def __init__(self, p1, p2):
        self.configs = Configs()
        self.BOARD_COLS = self.configs.BOARD_COLS
        self.BOARD_ROWS = self.configs.BOARD_ROWS
        self.POLICIES_DIR = self.configs.POLICIES_DIR

        self.p1 = p1
        self.p2 = p2

        self.board = np.zeros((self.BOARD_COLS, self.BOARD_ROWS))
        self.boardHash = None
        self.isEnd = False
        self.playerSymbol = 1
Example #9
0
    def __init__(self):
        train_configs = Configs()
        self.workspace_limits = train_configs.WORKSPACE_LIMITS
        self.obj_mesh_dir = train_configs.OBJ_MESH_DIR
        self.texture_dir = train_configs.TEXTURE_DIR
        self.num_obj = train_configs.MAX_OBJ_NUM

        is_testing = False
        test_preset_cases = False
        test_preset_file = None

        # Initialize camera and robot
        self.robot = Robot(self.obj_mesh_dir, self.num_obj,
                           self.workspace_limits, is_testing,
                           test_preset_cases, test_preset_file)
Example #10
0
 def __init__(self, pretrained_net, n_class):
     super(FCN32s, self).__init__()
     self.n_class = n_class
     self.pretrained_net = pretrained_net
     self.configs = Configs()
     self.save_hyperparameters()
     self.relu = nn.ReLU(inplace=True)
     self.deconv1 = nn.ConvTranspose2d(512,
                                       512,
                                       kernel_size=3,
                                       stride=2,
                                       padding=1,
                                       dilation=1,
                                       output_padding=1)
     self.bn1 = nn.BatchNorm2d(512)
     self.deconv2 = nn.ConvTranspose2d(512,
                                       256,
                                       kernel_size=3,
                                       stride=2,
                                       padding=1,
                                       dilation=1,
                                       output_padding=1)
     self.bn2 = nn.BatchNorm2d(256)
     self.deconv3 = nn.ConvTranspose2d(256,
                                       128,
                                       kernel_size=3,
                                       stride=2,
                                       padding=1,
                                       dilation=1,
                                       output_padding=1)
     self.bn3 = nn.BatchNorm2d(128)
     self.deconv4 = nn.ConvTranspose2d(128,
                                       64,
                                       kernel_size=3,
                                       stride=2,
                                       padding=1,
                                       dilation=1,
                                       output_padding=1)
     self.bn4 = nn.BatchNorm2d(64)
     self.deconv5 = nn.ConvTranspose2d(64,
                                       32,
                                       kernel_size=3,
                                       stride=2,
                                       padding=1,
                                       dilation=1,
                                       output_padding=1)
     self.bn5 = nn.BatchNorm2d(32)
     self.classifier = nn.Conv2d(32, n_class, kernel_size=1)
Example #11
0
def connect_device(device_name):
    """connect_device(device_id) -> Device    
    Connect a device according to device ID.
    """
    environ = os.environ
    device_id = environ.get(device_name)
    if device_id == None:
        device_id = device_name
    backend = Configs("common").get("backend", "Info")
    logger.debug("Device ID is " + device_id + " backend is " + backend)
    if backend.upper() == "MONKEY":
        from monkeyUser import MonkeyUser
        device = globals()["%sUser" % backend](device_id)
    else:
        device = Device(device_id)
    if device is None:
        logger.critical("Cannot connect device.")
        raise RuntimeError("Cannot connect %s device." % device_id)
    return device
Example #12
0
    def __init__(self, device, mod):
        self.product = Configs("common").get("product", "Info")
        self.device = connect_device(device)
        self.appconfig = AppConfig("appinfo")
        self.logger = createlogger(mod)
        self.camera = Camera(self.device, "media_camera")
        self.record = Recorder(self.device, "media_recorder")
        #self.browser = Browser(self.device,"media_browser")
        self.chrome = Chrome(self.device, "media_chrome")
        if self.product == "Sprints":
            self.music = PlayMusic(self.device, "media_music")
        else:
            self.music = Music(self.device, "media_music")
        self.suc_times = 0
        self.mod_cfg = GetConfigs(mod)
        self.test_times = 0
        self.dicttesttimes = self.mod_cfg.get_test_times()

        for i in self.dicttesttimes:
            self.test_times += int(self.dicttesttimes[i])
            if i.upper() in ('VIDEOTIMES', 'RECORDER', 'PHOTOTIMES'):
                self.test_times += int(self.dicttesttimes[i]) * 2
        self.logger.info('Trace Total Times ' + str(self.test_times))
Example #13
0
import sys

from preprocess import dataframe_preprocess
from dataset import SegmentationDataset
from pytorch_lightning_module import ClassifyModel
from models import kaeru_classify_model
from configs import Configs
from utils import fix_seed

# sys.path.append(os.environ.get("TOGURO_LIB_PATH"))
# from slack import Slack
# from sheet import Sheet

start = time.time()

config = Configs()
fix_seed(config.SEED)

if __name__ == "__main__":
    df = dataframe_preprocess(os.path.join(config.input_path, "train.csv"))

    kf = KFold(n_splits=5, shuffle=True, random_state=config.SEED)
    for i, (train, test) in enumerate(kf.split(df)):
        if i == config.fold:
            train_loc, test_loc = train, test

    df_train = df.iloc[train_loc]
    df_valid = df.iloc[test_loc]

    train_dataset = SegmentationDataset(df_train,
                                        image_folder=os.path.join(
Example #14
0
    from keras.datasets import mnist, cifar10

    conf_parser = argparse.ArgumentParser(
        description=__doc__,  # printed with -h/--help
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)

    defaults = {}

    conf_parser.add_argument("-c",
                             "--conf_file",
                             help="Specify config file",
                             metavar="FILE_PATH")
    args, remaining_argv = conf_parser.parse_known_args()

    cfg = Configs(args.conf_file) if args.conf_file else Configs()

    parser = argparse.ArgumentParser(parents=[conf_parser])
    parser.set_defaults(**defaults)
    parser.add_argument("--nb_epoch",
                        help="Number of epochs",
                        type=int,
                        metavar="INT")
    parser.add_argument("--red_only",
                        help="Use red only",
                        type=int,
                        metavar="INT")
    parser.add_argument("--block_size",
                        help="Size of each block for the VAE",
                        type=int,
                        metavar="INT")
Example #15
0
 def load_config(self, filename):
     with open(filename) as configFile:
         configDict = json.load(configFile)
     self.config = Configs(configDict)
     self.fitnesses = np.zeros((self.config.pop_size))
Example #16
0
import cv2
from imagedata import ImageData

import time

from configs import Configs

from detectors.retina import FaceDetector
from exec_backends.trt_backend import RetinaInfer as RetinaInferTRT
from exec_backends.onnxrt_backend import RetinaInfer as RetinaInferORT
'''
ATTENTION!!! This script is for testing purposes only. Work in progress.
'''

config = Configs(models_dir='/models')

iters = 100

#model_name = 'retinaface_mnet025_v2'
model_name = 'retinaface_r50_v1'
input_shape = [1024, 768]

# 'plan' for TensorRT or 'onnx' for ONNX
backend = 'plan'

retina_backends = {'onnx': RetinaInferORT, 'plan': RetinaInferTRT}

model_dir, model_path = config.build_model_paths(model_name, backend)

retina_backend = retina_backends[backend](rec_name=model_path)
Example #17
0
 def __init__(self):
     mongo_config = Configs().mongo_config
     self.connection_string = mongo_config.connection_string
     self.database = mongo_config.database
def main():
    # global encode_length, vector_size

    ## 1. intent 데이터셋 불러오기
    config = Configs()
    okt = Okt()

    question = preprocess_data(True)
    joinStr = ' '.join(question)

    morphs = okt.morphs(joinStr)
    joinString = ' '.join(morphs)
    pos1 = okt.pos(joinString)
    pos2 = ' '.join(list(map(lambda x: '\n' if x[1] in ['Punctuation'] else x[0], pos1))).split('\n')
    morphs = list(map(lambda x: okt.morphs(x), pos2))


    ## 2. 워드 임베딩
    print("\n### Fasttext bulid model ###", end="\n")
    word2vec_model = FastText(size=config.vector_size, window=3, workers=8, min_count= 1)
    # word2vec_model = FastText(size=config.vector_size, window=2, workers=8, min_count= 1)
    word2vec_model.build_vocab(morphs)
    print('\n### Fasttext build complete ###', end="\n")

    print('\n### Fasttext trian start ###', end="\n")
    word2vec_model.train(morphs, total_examples= word2vec_model.corpus_count, epochs= word2vec_model.epochs, compute_loss=True, verbose=1)
    print('\n### Fasttext train complete ###', end="\n")

    word2vec_model.save(config.fasttext_model_path+"intent_fasttextmodel")
    print('\n### Fasttext model save ###', end="\n")
    
    w2c_index = word2vec_model.wv.index2word # fasttext가 적용된 단어 목록들
    print("[DEBUG1-1]############ FastText representation ############", end="\n\n")
    print(w2c_index, end="\n\n\n")
    print('\n\n[DEBUG1-1]word_index 단어 개수 >> ', len(w2c_index)) # <class 'list'>

    ### intentIndex 저장
    with open(config.fasttext_model_path+'/intentIndex.pickle', 'wb') as f:
        pickle.dump(w2c_index, f, pickle.HIGHEST_PROTOCOL)

    print("_________________________________________________________________________________________________________________\n")



    # # y_data 생성
    y_data = config.df['intent']
    y_data = y_data.map(config.intent_mapping)
    y_data = to_categorical(y_data)

    
    # x_data 생성
    # encode_length = 15
    x_data = []
    for q_raw in question:
        q_raw = okt.morphs(q_raw) # 문장 형태소별로 분리(단어 분리). str > list
        q_raw = list(map(lambda x: q_raw[x] if x < len(q_raw) else '#', range(config.encode_length)))
        q_raw = list(map(lambda x: word2vec_model[x] if x in w2c_index else np.zeros(config.vector_size, dtype=float), q_raw))
        q_raw = np.array(q_raw)
        x_data.append(q_raw)
        
    x_data = np.array(x_data)   # (None, 15, 300)
    x_data = x_data.reshape(len(config.df), config.encode_length, config.vector_size, 1)
    print(x_data.shape)

    ## vector numpy array save
    # np.save("fasttext_vector.npy", x_data)
    print("_________________________________________________________________________________________________________________\n")



    ## 3. 모델 생성 및 훈련
    print("shape >>", x_data.shape, y_data.shape)   # (None, 15 ,300, 1) / (None, 5)

    model = Sequential()
    model.add(Conv2D(12, kernel_size=(2,2), input_shape=(config.encode_length, config.vector_size, 1), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    model.add(Conv2D(12, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    model.add(Conv2D(12, kernel_size=(4,4), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    
    model.add(Conv2D(12, kernel_size=(2,2), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    model.add(Conv2D(12, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    model.add(Conv2D(12, kernel_size=(4,4), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))

    model.add(Conv2D(12, kernel_size=(2,2), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    model.add(Conv2D(12, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu", data_format='channels_first'))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    model.add(Conv2D(12, kernel_size=(4,4), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))

    model.add(Conv2D(12, kernel_size=(2,2), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    model.add(Conv2D(12, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu", data_format='channels_first'))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))
    model.add(Conv2D(12, kernel_size=(4,4), strides=(1,1), padding="valid", activation="relu"))
    model.add(MaxPooling2D(pool_size=(1,1), strides=(1,1)))

    model.add(Flatten())
    model.add(BatchNormalization())
    # model.add(Dropout(1.0))
    model.add(Dense(128, activation="relu"))
    # model.add(Dropout(0.1))
    model.add(Dense(5, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    # stop = EarlyStopping(monitor="loss", patience=20, mode="auto")


    model.summary()
    
    model.fit(x_data, y_data, batch_size=64, epochs=500)
    # model.fit(x_data, y_data, batch_size=64, epochs=500, callbacks=[stop])
    print("_________________________________________________________________________________________________________________")
    loss, acc = model.evaluate(x_data, y_data)
    print("loss >> ", loss)
    print("acc >>", acc, end="\n")



    ## 4. 모델 저장
    path = config.intent_model_path
    file_list = os.listdir(path)

    new_num = 0
    if os.path.exists(path):    # 파일 있을경우
        for i in file_list:
            num = int(i.split(".")[0].split("-")[-1])

            if new_num <= num:
                new_num = num + 100
            else:
                pass

        
        model_name = "intent_model-"+str(new_num)+".h5"
        weights_name = "intent_weights-"+str(new_num)+".h5"
        print("\n\nFile name >>",model_name)
        model.save(path+model_name)
        model.save_weights(path+weights_name)
            
    else:
        model.save(path+"intent_model-100.h5")
        model.save_weights(path+"intent_weights-100.h5")

    print("\n#### MODEL SAVE ####", end='\n')
Example #19
0
 def __init__(self, api_base_url):
     from configs import Configs
     self.configs = Configs()
     self.api_base_url = self.configs.get(api_base_url)
 def __init__(self):
     self.configs = Configs()
Example #21
0
                    help="Number of compilation threads for make")

parser.add_argument(
    "--config-file",
    dest="config_file",
    action="store",
    type=str,
    default='',
    required=True,
    help=
    "configuration file in json format specifying paths of prigams needed to compile CC3D"
)

args = parser.parse_args()
# -------------- end of parsing command line
CFG = Configs(json_fname=args.config_file)

MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION, INSTALLER_BUILD = version_str_to_tuple(
    args.version)

version_str = version_tuple_to_str(version_component_sequence=(MAJOR_VERSION,
                                                               MINOR_VERSION,
                                                               BUILD_VERSION),
                                   number_of_version_components=3)

installer_version_str = version_tuple_to_str(
    version_component_sequence=(MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION,
                                INSTALLER_BUILD),
    number_of_version_components=3)

CURRENT_DIR = os.getcwd()
Example #22
0
def main_one_stop():
    # merge detection results into one .txt
    root_path = r'\\DATACHEWER\shareZ\2020_01_01_KateyCapture\Converted'
    work_path = r'D:\CalibrationData\CameraCalibration\2020_01_01_KateyCapture'
    if not os.path.exists(work_path):
        os.mkdir(work_path)

    configs = Configs()
    configs.num_cams = 16
    configs.frame_range = (0, 3060)
    configs.cam_range = (0, 15)

    Calibrator.detect_opencv_corners_multiprocess(root_path, work_path,
                                                  configs)

    # run only if a folder name has to be changed
    # Parser.change_corner_folder_name(root_path, 'Corners_20200102')

    # for visualizing detected corners
    # Calibrator.save_opencv_corners_on_images(root_path, work_path, configs)
    """
    detection results
    """
    Parser.merge_detection_results(root_path, work_path, configs)

    # """
    # outliers
    # """
    output_path = work_path + '\\CroppedCorners_2'
    if not os.path.exists(output_path):
        os.mkdir(output_path)
    Calibrator.save_ranked_corner_crops(root_path, output_path, configs)
    # Calibrator.determine_corner_outliers(root_path, output_path)
    score_thres = 500
    Calibrator.generate_outliers_txt(root_path,
                                     work_path,
                                     score_thres,
                                     configs,
                                     export=True)
    """
    # initial camera parameters
    # """
    first_cam_setup = False
    if first_cam_setup:
        # if -1: obtain intrinsics for all cameras
        # if c >= 0: copy&paste intrinsic from given camera index, c.
        configs.num_stereo_imgs = 10
        configs.center_cam_idx = 0
        configs.center_img_name = '0500'
        configs.num_single_calib_imgs = 80
        standard_intrinsics_cam = 0
        Calibrator.compute_initial_camera_parameters(root_path, work_path,
                                                     standard_intrinsics_cam,
                                                     configs)
    else:
        from_path = r'D:\CalibrationData\CameraCalibration\191205_16Cams\SingleCalibrations'
        to_path = work_path + '\\SingleCalibrations'
        if not os.path.exists(to_path):
            os.mkdir(to_path)
        Calibrator.copy_intial_camera_parameters(from_path, to_path)
    """
    Bundle adjustment input
    """
    Generator.generate_bund_adj_input(root_path, work_path, configs)
    Generator.generate_bund_adj_initial_values_16cams(root_path, work_path,
                                                      configs)
    options = {'exclude_outliers': True, 'center_region': False}
    Generator.generate_chb_image_points_input(root_path, work_path, configs,
                                              options)
Example #23
0
def main_calibration(run):
    user_inputs = load_user_inputs('user_inputs.txt')

    configs = Configs()
    configs.num_cams = user_inputs['num_cams']

    # run = 2
    if run == 0:
        root_path = user_inputs['root_path']
        work_path = user_inputs['work_path']
        Calibrator.detect_opencv_corners_multiprocess(root_path, work_path,
                                                      configs)
    elif run == 1:
        Calibrator.save_opencv_corners_on_images()
    elif run == 2:
        # merge detection results into one .txt
        # root_path = r'D:\Pictures\2019_12_03_capture'
        # output_path = r'D:\Pictures\2019_12_03_capture'
        root_path = user_inputs['root_path']
        output_path = root_path
        Parser.merge_detection_results(root_path, output_path, configs)
    elif run == 3:
        cam0 = input('Camera start index: ')
        cam1 = input('Camera end index: ')
        cam_range = (int(cam0), int(cam1))
        # root_path = input('Root path (e.g., r"D:/Pictures/2019_12_03_capture/Converted)": ')
        root_path = user_inputs['root_path']
        # Calibrator.save_ranked_corner_crops(cam_range, root_path)
        Calibrator.save_ranked_corner_crops(cam_range, root_path)
    elif run == 4:
        # root_path = input('Root path (e.g., r"D:/Pictures/2019_12_03_capture/Converted)": ')
        root_path = user_inputs['root_path']
        Calibrator.determine_corner_outliers(root_path)
    elif run == 5:
        root_path = user_inputs['root_path']
        export = input('\nSave outlier images as well? [y/n]: ')
        Calibrator.generate_outliers_txt(root_path, export, configs)
    elif run == 6:
        configs.num_stereo_imgs = user_inputs['num_stereo_imgs']
        configs.center_cam_idx = user_inputs['center_cam_idx']
        configs.center_img_name = user_inputs['center_img_name']
        configs.num_single_calib_imgs = user_inputs['num_single_calib_imgs']
        # root_path = input('Root path (e.g., r"D:/Pictures/2019_12_03_capture"): ')
        root_path = user_inputs['root_path']
        work_path = user_inputs['work_path']

        # if -1: obtain intrinsics for all cameras
        # if c > 0: copy&paste intrinsic from given camera index, c.
        standard_intrinsics_cam = 0
        Calibrator.compute_initial_camera_parameters(root_path, work_path,
                                                     standard_intrinsics_cam,
                                                     configs)
    elif run == 7:
        root_path = user_inputs['root_path']
        work_path = user_inputs['work_path']
        Calibrator.compute_initial_worldpoints_using_PnP(
            root_path, work_path, configs)
    elif run == 8:
        # root_path = input('Root path (e.g., r"D:/Pictures/2019_12_03_capture"): ')
        work_path = user_inputs['work_path']
        Renderer.render_camera_scene(work_path, configs)
    elif run == 9:
        root_path = user_inputs['root_path']
        Generator.generate_bund_adj_input(root_path, configs)
    elif run == 10:
        root_path = user_inputs['root_path']
        work_path = user_inputs['work_path']
        Generator.generate_bund_adj_initial_values_16cams(
            root_path, work_path, configs)
    elif run == 11:
        options = {
            'exclude_outliers': True,
            'center_region': False,
            'frame_range': (0, 4400)
        }
        root_path = user_inputs['root_path']
        work_path = user_inputs['work_path']
        Generator.generate_chb_image_points_input(root_path, work_path,
                                                  configs, options)
    elif run == 12:
        # after bundle adjustment
        root_path = user_inputs['root_path']
        work_path = user_inputs['work_path']
        Generator.generate_cam_params_from_bund_adj(root_path, work_path,
                                                    configs)
    elif run == 13:
        work_path = user_inputs['work_path']
        cam_param_path = work_path + r'\BundleAdjustment\output\bundle_adjustment_6dof\bundleadjustment_output.txt'
        Generator.export_cam_params_to_txt(work_path, configs, cam_param_path)
    elif run == 14:
        work_path = user_inputs['work_path']
        root_path = user_inputs['root_path']
        root_root_path = '\\'.join(root_path.split('\\')[0:-1])
        input_path = work_path + r'\\Triangulation\input\cam_params.json'
        output_path = root_root_path + r'\\CameraParameters'
        if not os.path.exists(output_path):
            os.mkdir(output_path)
        # copy json to output_path
        out_shared = output_path + '\\cam_params.json'
        out_local = work_path + '\\FinalCamParams\cam_params.json'
        shutil.copyfile(input_path, out_shared)
        shutil.copyfile(input_path, out_local)

        print('1. Saved to: {}'.format(out_local))
        print('2. Saved to: {}'.format(out_shared))
        output_path += '\\cam_params.txt'
        Generator.cam_params_json_to_txt(input_path, output_path, configs)
    else:
        print("[ERROR] invalid input integer! {}".format(run))
Example #24
0
import cv2
from PIL import Image
import time
import shutil
from vrep_env import ArmEnv
import random
from configs import Configs
import my_utils
import shutil
from tensorboardX import SummaryWriter
from prioritized_memory import Memory
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

#set hyper parameters
Train_Configs = Configs()

env = ArmEnv()

DIM_ACTIONS = Train_Configs.DIM_ACTIONS
DIM_STATES = Train_Configs.DIM_STATES
CHANNELS = Train_Configs.ANGLE_CHANNELS

#--------------------------------------
# Build network for q-value prediction
# Input: RGB-D, dim:[3,224,224]
# Output: [1,224,224]
#--------------------------------------
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
Example #25
0
def create_twitter_client(kafka_producer, configs):
    listener = StdOutListener(kafka_producer, configs.kafka_topic)
    auth = OAuthHandler(configs.consumer_key, configs.consumer_secret)
    auth.set_access_token(configs.access_token_key, configs.access_token_secret)

    return Stream(auth, listener)


def create_kafka_producer():
    # https://www.confluent.io/blog/introduction-to-apache-kafka-for-python-programmers/
    from confluent_kafka import Producer

    p = Producer({'bootstrap.servers': 'localhost:9092',
                  'acks': 'all',
                  'enable.idempotence': 'true',
                  'compression.type': 'snappy'})
    return p


configs = Configs()
producer = None
try:
    producer = create_kafka_producer()
    client = create_twitter_client(producer, configs)

    client.filter(track=configs.twitter_topics)

finally:
    exit_gracefully(producer)
Example #26
0
 def should_process(self, tweet):
     bot_config = Configs().bot_config
     return tweet.user.id_str in bot_config.follow_triggers and tweet.in_reply_to_user_id is None
Example #27
0
# Imports the Google Cloud client library
from google.cloud import storage
from configs import Configs

if __name__ == "__main__":
    configs = Configs(mode="train")

    # Instantiates a client
    client = storage.Client()
    bucket = client.get_bucket('dl-torch')
Example #28
0
import os
import shutil
from shutil import copyfile, rmtree, copytree

from configs import Configs

if __name__ == "__main__":
    configs = Configs(mode="export")
    params = configs.params
    working_dir = os.path.join("dist", params.path)
    rmtree(working_dir)
    os.makedirs(working_dir)

    src_files = [
        ("train.py", "train.py"),
        ("eval.py", "eval.py"),
        ("infer.py", "infer.py"),
        (os.path.join("models", params.model + ".py"), "model.py"),
        (os.path.join("datasets", params.dataset.name + ".py"), "dataset.py")
    ]
    for src, dst in src_files:
        copyfile(os.path.join("src", src), os.path.join(working_dir, dst))

    copytree(
        os.path.join("src", "utils"),
        os.path.join(working_dir, "utils"),
        ignore=shutil.ignore_patterns("__pycache__")
    )
Example #29
0
def train(argv=None):
    ''' train Block Gated PixelCNN model 
    Usage:
    	python block_cnn.py -c sample_train.cfg        : training example using configfile
    	python block_cnn.py --option1 hoge ...         : train with command-line options
        python block_cnn.py -c test.cfg --opt1 hoge... : overwrite config options with command-line options
    '''

    ### parsing arguments from command-line or config-file ###
    if argv is None:
        argv = sys.argv

    conf_parser = argparse.ArgumentParser(
        description=__doc__,  # printed with -h/--help
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument("-c",
                             "--conf_file",
                             help="Specify config file",
                             metavar="FILE_PATH")
    args, remaining_argv = conf_parser.parse_known_args()
    defaults = {}

    # if args.conf_file:
    # config = configparser.SafeConfigParser()
    # config.read([args.conf_file])
    # defaults.update(dict(config.items("General")))

    cfg = Configs(args.conf_file) if args.conf_file else Configs()

    original_dim = cfg.original_dim
    intermediate_dim = cfg.intermediate_dim
    latent_dim = cfg.latent_dim
    block_size = cfg.block_size
    dataset = cfg.dataset
    red_only = cfg.red_only

    epsilon_std = cfg.epsilon_std

    block_cnn_weights = cfg.block_cnn_weights
    block_cnn_nb_epoch = cfg.block_cnn_nb_epoch
    block_cnn_batch_size = cfg.block_cnn_batch_size

    block_vae_weights = cfg.block_vae_weights

    block_cnn_outputs_dir = cfg.get_bcnn_out_path()
    block_vae_outputs_dir = cfg.get_bvae_out_path()

    parser = argparse.ArgumentParser(parents=[conf_parser])
    parser.set_defaults(**defaults)
    parser.add_argument("--nb_epoch",
                        help="Number of epochs [Required]",
                        type=int,
                        metavar="INT")
    parser.add_argument("--nb_images",
                        help="Number of images to generate",
                        type=int,
                        metavar="INT")
    parser.add_argument("--batch_size",
                        help="Minibatch size",
                        type=int,
                        metavar="INT")
    parser.add_argument(
        "--conditional",
        help="model the conditional distribution p(x|h) (default:False)",
        type=str,
        metavar="BOOL")
    parser.add_argument(
        "--nb_pixelcnn_layers",
        help="Number of PixelCNN Layers (exept last two ReLu layers)",
        metavar="INT")
    parser.add_argument("--nb_filters",
                        help="Number of filters for each layer",
                        metavar="INT")
    parser.add_argument(
        "--filter_size_1st",
        help="Filter size for the first layer. (default: (7,7))",
        metavar="INT,INT")
    parser.add_argument(
        "--filter_size",
        help="Filter size for the subsequent layers. (default: (3,3))",
        metavar="INT,INT")
    parser.add_argument("--optimizer",
                        help="SGD optimizer (default: adadelta)",
                        type=str,
                        metavar="OPT_NAME")
    parser.add_argument("--es_patience",
                        help="Patience parameter for EarlyStopping",
                        type=int,
                        metavar="INT")
    parser.add_argument(
        "--save_root",
        help=
        "Root directory which trained files are saved (default: /tmp/pixelcnn)",
        type=str,
        metavar="DIR_PATH")
    parser.add_argument(
        "--timezone",
        help=
        "Trained files are saved in save_root/YYYYMMDDHHMMSS/ (default: Asia/Tokyo)",
        type=str,
        metavar="REGION_NAME")
    parser.add_argument(
        "--save_best_only",
        help="The latest best model will not be overwritten (default: False)",
        type=str,
        metavar="BOOL")

    args = parser.parse_args(remaining_argv)

    conditional = strtobool(args.conditional) if args.conditional else False

    ### load dataset ###
    if dataset == 'cifar10':
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        num_classes = 10
    elif dataset == 'mnist':
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        num_classes = 10
        # add dimension for channels
        x_train = np.expand_dims(x_train, axis=-1)
        x_test = np.expand_dims(x_test, axis=-1)
    if red_only:
        # select only red channel
        x_train = x_train[:, :, :, [0]]
        x_test = x_test[:, :, :, [0]]
    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.

    h_train = keras.utils.to_categorical(y_train, num_classes)
    h_test = keras.utils.to_categorical(y_test, num_classes)

    imgs_train = x_train
    imgs_test = x_test

    num_blocks_y = x_train.shape[1] / block_size
    num_blocks_x = x_train.shape[2] / block_size
    num_blocks = num_blocks_y * num_blocks_x
    num_channels = imgs_train.shape[3]

    ### encoded block image size ###
    input_size = (num_blocks_y, num_blocks_x)

    utils = Utils()

    # run blocks through pre-trained encoder
    block_vae = BlockVAE(x_train.shape[1:4], block_size, intermediate_dim,
                         latent_dim)
    vae_model = block_vae.get_vae_model()
    encoder_model = block_vae.get_encoder_model()
    decoder_model = block_vae.get_decoder_model()
    vae_model.load_weights(block_vae_outputs_dir + block_vae_weights)

    ### build PixelCNN model ###
    model_params = {}
    model_params['input_size'] = input_size
    model_params['nb_channels'] = block_vae.latent_dim
    model_params['conditional'] = conditional
    if conditional:
        model_params['latent_dim'] = num_classes
    if args.nb_pixelcnn_layers:
        model_params['nb_pixelcnn_layers'] = int(args.nb_pixelcnn_layers)
    if args.nb_filters:
        model_params['nb_filters'] = int(args.nb_filters)
    if args.filter_size_1st:
        model_params['filter_size_1st'] = tuple(
            map(int, args.filter_size_1st.split(',')))
    if args.filter_size:
        model_params['filter_size'] = tuple(
            map(int, args.filter_size.split(',')))
    if args.optimizer:
        model_params['optimizer'] = args.optimizer
    if args.es_patience:
        model_params['es_patience'] = int(args.patience)
    if args.save_best_only:
        model_params['save_best_only'] = strtobool(args.save_best_only)

    save_root = args.save_root if args.save_root else '/tmp/pixelcnn_mnist'
    timezone = args.timezone if args.timezone else 'Asia/Tokyo'
    current_datetime = datetime.now(
        pytz.timezone(timezone)).strftime('%Y%m%d_%H%M%S')
    save_root = os.path.join(save_root, current_datetime)
    model_params['save_root'] = save_root

    if not os.path.exists(save_root):
        os.makedirs(save_root)

    pixelcnn = PixelCNN(**model_params)
    pixelcnn.build_model()

    if not os.path.exists(block_cnn_outputs_dir + block_cnn_weights):

        # NOTE: Now it is compulsory to add the nb_epoch and
        # batch_size variables in the configuration file as
        # 'block_cnn_nb_epoch' and 'block_cnn_batch_size' respectively
        nb_epoch = block_cnn_nb_epoch
        batch_size = block_cnn_batch_size

        # try:
        #     nb_epoch = int(args.nb_epoch)
        #     batch_size = int(args.batch_size)
        # except:
        #     sys.exit("Error: {--nb_epoch, --batch_size} must be specified.")

        pixelcnn.print_train_parameters(save_root)
        pixelcnn.export_train_parameters(save_root)
        with open(os.path.join(save_root, 'parameters.txt'), 'a') as txt_file:
            txt_file.write('########## other options ##########\n')
            txt_file.write('nb_epoch\t: %s\n' % nb_epoch)
            txt_file.write('batch_size\t: %s\n' % batch_size)
            txt_file.write('\n')

        pixelcnn.model.summary()

        # encode images using VAE
        print('Encoding blocks...')
        encoded_blocks_train = encoder_model.predict(x_train,
                                                     verbose=1,
                                                     batch_size=batch_size)[0]
        encoded_blocks_test = encoder_model.predict(x_test,
                                                    verbose=1,
                                                    batch_size=batch_size)[0]

        train_params = {}
        if conditional:
            train_params['x'] = [encoded_blocks_train, h_train]
            train_params['validation_data'] = ([encoded_blocks_test,
                                                h_test], encoded_blocks_test)
        else:
            train_params['x'] = encoded_blocks_train
            train_params['validation_data'] = (encoded_blocks_test,
                                               encoded_blocks_test)
        train_params['y'] = encoded_blocks_train
        train_params['nb_epoch'] = nb_epoch
        train_params['batch_size'] = batch_size
        train_params['shuffle'] = True

        start_time = time.time()
        pixelcnn.fit(**train_params)
        elapsed_time = time.time() - start_time

        print '------------------------------------------------'
        print 'Elapsed time: ' + str(elapsed_time)

        pixelcnn.model.save_weights(block_cnn_outputs_dir + block_cnn_weights)
    else:
        pixelcnn.model.load_weights(block_cnn_outputs_dir + block_cnn_weights)

    ## prepare zeros array
    nb_images = int(args.nb_images) if args.nb_images else 8
    batch_size = int(args.batch_size) if args.batch_size else nb_images
    #X_pred = np.zeros((nb_images, input_size[0], input_size[1], block_vae.latent_dim))

    # encode training images using BlockVAE
    X_pred = encoder_model.predict(x_train[0:nb_images])[0]
    # generate encode bottom half of images block by block
    X_pred = generate_bottom_half(X_pred, h_train[0:nb_images], conditional,
                                  block_vae, pixelcnn)
    # decode encoded images
    decode_images_and_predict(X_pred, block_vae, decoder_model, cfg, 'train')

    # encode testing images using BlockVAE
    X_pred = encoder_model.predict(x_test[0:nb_images])[0]
    # generate encode bottom half of images block by block
    X_pred = generate_bottom_half(X_pred, h_test[0:nb_images], conditional,
                                  block_vae, pixelcnn)
    # decode encoded images
    decode_images_and_predict(X_pred, block_vae, decoder_model, cfg, 'test')

    # randomly sample images
    X_pred = np.zeros(
        (num_classes, num_blocks_y, num_blocks_x, block_vae.latent_dim))
    h_pred = np.arange(num_classes)
    h_pred = keras.utils.to_categorical(h_pred, num_classes)

    ### generate encoded images block by block
    for i in range(input_size[0]):
        for j in range(input_size[1]):
            if conditional:
                x = [X_pred, h_pred]
            else:
                x = X_pred

            next_X_pred = pixelcnn.model.predict(x, batch_size)
            samp = next_X_pred[:, i, j, :]
            #X_pred[:,i,j,:] = samp
            noise = np.random.randn(num_classes, block_vae.latent_dim)
            X_pred[:, i, j, :] = samp + noise * 0.1

    # decode encoded images
    decode_images_and_predict(X_pred, block_vae, decoder_model, cfg, 'sampled')
Example #30
0
def train(argv=None):
    ''' train Block Gated PixelCNN model 
    Usage:
    	python block_cnn.py -c sample_train.cfg        : training example using configfile
    	python block_cnn.py --option1 hoge ...         : train with command-line options
        python block_cnn.py -c test.cfg --opt1 hoge... : overwrite config options with command-line options
    '''

    ### parsing arguments from command-line or config-file ###
    if argv is None:
        argv = sys.argv

    conf_parser = argparse.ArgumentParser(
        description=__doc__,  # printed with -h/--help
        formatter_class=argparse.RawDescriptionHelpFormatter,
        add_help=False)
    conf_parser.add_argument("-c",
                             "--conf_file",
                             help="Specify config file",
                             metavar="FILE_PATH")
    args, remaining_argv = conf_parser.parse_known_args()
    defaults = {}

    # if args.conf_file:
    # config = configparser.SafeConfigParser()
    # config.read([args.conf_file])
    # defaults.update(dict(config.items("General")))

    cfg = Configs(args.conf_file) if args.conf_file else Configs()

    original_dim = cfg.original_dim
    intermediate_dim = cfg.intermediate_dim
    latent_dim = cfg.latent_dim
    num_layers = cfg.num_layers
    loss_type = cfg.vae_loss_type
    block_size = cfg.block_size
    dataset = cfg.dataset
    red_only = cfg.red_only

    epsilon_std = cfg.epsilon_std

    gated = cfg.gated

    block_cnn_weights = cfg.block_cnn_weights
    block_cnn_nb_epoch = cfg.block_cnn_nb_epoch
    block_cnn_batch_size = cfg.block_cnn_batch_size

    block_vae_weights = cfg.block_vae_weights

    block_cnn_outputs_dir = cfg.get_bcnn_out_path()
    block_vae_outputs_dir = cfg.get_bvae_out_path()

    parser = argparse.ArgumentParser(parents=[conf_parser])
    parser.set_defaults(**defaults)
    parser.add_argument("--nb_epoch",
                        help="Number of epochs [Required]",
                        type=int,
                        metavar="INT")
    parser.add_argument("--nb_images",
                        help="Number of images to generate",
                        type=int,
                        metavar="INT")
    parser.add_argument("--batch_size",
                        help="Minibatch size",
                        type=int,
                        metavar="INT")
    parser.add_argument(
        "--conditional",
        help="model the conditional distribution p(x|h) (default:False)",
        type=str,
        metavar="BOOL")
    parser.add_argument(
        "--nb_pixelcnn_layers",
        help="Number of PixelCNN Layers (exept last two ReLu layers)",
        metavar="INT")
    parser.add_argument("--nb_filters",
                        help="Number of filters for each layer",
                        metavar="INT")
    parser.add_argument(
        "--filter_size_1st",
        help="Filter size for the first layer. (default: (7,7))",
        metavar="INT,INT")
    parser.add_argument(
        "--filter_size",
        help="Filter size for the subsequent layers. (default: (3,3))",
        metavar="INT,INT")
    parser.add_argument("--optimizer",
                        help="SGD optimizer (default: adadelta)",
                        type=str,
                        metavar="OPT_NAME")
    parser.add_argument("--es_patience",
                        help="Patience parameter for EarlyStopping",
                        type=int,
                        metavar="INT")
    parser.add_argument(
        "--save_root",
        help=
        "Root directory which trained files are saved (default: /tmp/pixelcnn)",
        type=str,
        metavar="DIR_PATH")
    parser.add_argument(
        "--timezone",
        help=
        "Trained files are saved in save_root/YYYYMMDDHHMMSS/ (default: Asia/Tokyo)",
        type=str,
        metavar="REGION_NAME")
    parser.add_argument(
        "--save_best_only",
        help="The latest best model will not be overwritten (default: False)",
        type=str,
        metavar="BOOL")
    parser.add_argument(
        "--calc_nll",
        help="Calculate the average NLL of the images (default: False)",
        type=int,
        metavar="INT")

    args = parser.parse_args(remaining_argv)

    conditional = strtobool(args.conditional) if args.conditional else False

    print "------------------------------"
    print "Dataset: ", dataset
    print "Block size: ", block_size
    print "Original dim: ", original_dim
    print "Latent dim ", latent_dim
    print "------------------------------"

    ### load dataset ###
    if dataset == 'cifar10':
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        num_classes = 10

        # select only frogs
        num_classes = 1
        x_train = x_train[(y_train == 6).flatten()]
        y_train = y_train[(y_train == 6).flatten()]
        x_test = x_test[(y_test == 6).flatten()]
        y_test = y_test[(y_test == 6).flatten()]
    elif dataset == 'mnist':
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        num_classes = 10
        # add dimension for channels
        x_train = np.expand_dims(x_train, axis=-1)
        # x_train = x_train[:10000]
        x_test = np.expand_dims(x_test, axis=-1)
        # x_test = x_test[:1500]
    elif dataset == 'lfw':
        f = h5py.File('lfw.hdf5', 'r')
        x = f['data'][:]
        y = f['label'][:]
        f.close()
        num_images = len(x)
        num_train = int(num_images * 8 / 10)
        shuffled_inds = np.random.permutation(num_images)
        train_inds = shuffled_inds[:num_train]
        test_inds = shuffled_inds[num_train:]
        x_train = x[train_inds]
        y_train = y[train_inds]
        x_test = x[test_inds]
        y_test = y[test_inds]
        num_classes = np.max(y[:]) + 1
    if red_only:
        # select only red channel
        x_train = x_train[:, :, :, [0]]
        x_test = x_test[:, :, :, [0]]
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255.
    x_test /= 255.

    if num_classes > 1:
        h_train = keras.utils.to_categorical(y_train, num_classes)
        h_test = keras.utils.to_categorical(y_test, num_classes)
    else:
        h_train = np.copy(y_train)
        h_test = np.copy(y_test)

    imgs_train = x_train
    imgs_test = x_test

    num_blocks_y = x_train.shape[1] / block_size
    num_blocks_x = x_train.shape[2] / block_size
    num_blocks = num_blocks_y * num_blocks_x
    num_channels = imgs_train.shape[3]

    ### encoded block image size ###
    input_size = (num_blocks_y, num_blocks_x)

    utils = Utils()

    # run blocks through pre-trained encoder
    block_vae = BlockVAE(original_dim, intermediate_dim, latent_dim,
                         num_layers, loss_type, epsilon_std)
    vae_model = block_vae.get_vae_model()
    encoder_model = block_vae.get_encoder_model()
    decoder_model = block_vae.get_decoder_model()
    vae_model.load_weights(block_vae_outputs_dir + block_vae_weights)

    ### build PixelCNN model ###
    model_params = {}
    model_params['input_size'] = input_size
    model_params['nb_channels'] = block_vae.latent_dim
    model_params['conditional'] = conditional
    if conditional:
        model_params['latent_dim'] = num_classes
    if gated:
        model_params['gated'] = strtobool(gated)
    if args.nb_pixelcnn_layers:
        model_params['nb_pixelcnn_layers'] = int(args.nb_pixelcnn_layers)
    if args.nb_filters:
        model_params['nb_filters'] = int(args.nb_filters)
    if args.filter_size_1st:
        model_params['filter_size_1st'] = tuple(
            map(int, args.filter_size_1st.split(',')))
    if args.filter_size:
        model_params['filter_size'] = tuple(
            map(int, args.filter_size.split(',')))
    if args.optimizer:
        model_params['optimizer'] = args.optimizer
    if args.es_patience:
        model_params['es_patience'] = int(args.patience)
    if args.save_best_only:
        model_params['save_best_only'] = strtobool(args.save_best_only)

    save_root = args.save_root if args.save_root else '/tmp/pixelcnn_mnist'
    timezone = args.timezone if args.timezone else 'Asia/Tokyo'
    current_datetime = datetime.now(
        pytz.timezone(timezone)).strftime('%Y%m%d_%H%M%S')
    save_root = os.path.join(save_root, current_datetime)
    model_params['save_root'] = save_root

    if not os.path.exists(save_root):
        os.makedirs(save_root)

    pixelcnn = PixelCNN(**model_params)
    pixelcnn.build_model()

    if not os.path.exists(block_cnn_outputs_dir + block_cnn_weights):

        # NOTE: Now it is compulsory to add the nb_epoch and
        # batch_size variables in the configuration file as
        # 'block_cnn_nb_epoch' and 'block_cnn_batch_size' respectively
        nb_epoch = block_cnn_nb_epoch
        batch_size = block_cnn_batch_size

        # try:
        #     nb_epoch = int(args.nb_epoch)
        #     batch_size = int(args.batch_size)
        # except:
        #     sys.exit("Error: {--nb_epoch, --batch_size} must be specified.")

        pixelcnn.print_train_parameters(save_root)
        pixelcnn.export_train_parameters(save_root)
        with open(os.path.join(save_root, 'parameters.txt'), 'a') as txt_file:
            txt_file.write('########## other options ##########\n')
            txt_file.write('nb_epoch\t: %s\n' % nb_epoch)
            txt_file.write('batch_size\t: %s\n' % batch_size)
            txt_file.write('\n')

        pixelcnn.model.summary()

        # get image blocks for all images
        blocks_train = np.zeros((len(x_train) * num_blocks_y * num_blocks_x,
                                 block_size * block_size * num_channels),
                                dtype='float32')
        blocks_test = np.zeros((len(x_test) * num_blocks_y * num_blocks_x,
                                block_size * block_size * num_channels),
                               dtype='float32')
        for i in trange(len(x_train), desc='getting training image blocks'):
            blocks_train[i * num_blocks:(i + 1) *
                         num_blocks] = image_to_blocks(x_train[i], block_size)
        for i in trange(len(x_test), desc='getting testing image blocks'):
            blocks_test[i * num_blocks:(i + 1) * num_blocks] = image_to_blocks(
                x_test[i], block_size)

        # encode blocks using VAE
        print('Encoding blocks...')
        results = encoder_model.predict(blocks_train,
                                        verbose=1,
                                        batch_size=batch_size)[0]
        encoded_blocks_train = np.zeros(
            (len(x_train), num_blocks_y, num_blocks_x, block_vae.latent_dim))
        for i in xrange(len(x_train)):
            encoded_blocks_train[i] = results[i * num_blocks:(i + 1) *
                                              num_blocks].reshape(
                                                  num_blocks_y, num_blocks_x,
                                                  -1)

        results = encoder_model.predict(blocks_test,
                                        verbose=1,
                                        batch_size=batch_size)[0]
        encoded_blocks_test = np.zeros(
            (len(x_test), num_blocks_y, num_blocks_x, block_vae.latent_dim))
        h_test = np.zeros((len(x_test), num_classes))
        for i in xrange(len(x_test)):
            encoded_blocks_test[i] = results[i * num_blocks:(i + 1) *
                                             num_blocks].reshape(
                                                 num_blocks_y, num_blocks_x,
                                                 -1)

        train_params = {}
        if conditional:
            train_params['x'] = [encoded_blocks_train, h_train]
            train_params['validation_data'] = ([encoded_blocks_test,
                                                h_test], encoded_blocks_test)
        else:
            train_params['x'] = encoded_blocks_train
            train_params['validation_data'] = (encoded_blocks_test,
                                               encoded_blocks_test)
        train_params['y'] = encoded_blocks_train
        train_params['nb_epoch'] = nb_epoch
        train_params['batch_size'] = batch_size
        train_params['shuffle'] = True

        start_time = time.time()
        pixelcnn.fit(**train_params)
        elapsed_time = time.time() - start_time

        print '------------------------------------------------'
        print 'Elapsed time in BlockCNN: ' + str(elapsed_time)

        pixelcnn.model.save_weights(block_cnn_outputs_dir + block_cnn_weights)
    else:
        pixelcnn.model.load_weights(block_cnn_outputs_dir + block_cnn_weights)

    ## prepare zeros array
    nb_images = int(args.nb_images) if args.nb_images else 8
    batch_size = int(args.batch_size) if args.batch_size else nb_images
    #X_pred = np.zeros((nb_images, input_size[0], input_size[1], block_vae.latent_dim))

    # get image blocks for all images
    blocks_train = get_images_to_blocks(x_train, nb_images, block_size)
    blocks_test = get_images_to_blocks(x_test, nb_images, block_size)

    # write out ground truth images
    # for i in range(nb_images):
    #     imsave(block_cnn_outputs_dir + 'train_image_%02d.png' % i, x_train[i])
    #     imsave(block_cnn_outputs_dir + 'test_image_%02d.png' % i, x_test[i])

    for i in range(10):
        # encode training images using BlockVAE
        X_pred = encode_blocks_with_VAE(blocks_train, encoder_model, nb_images,
                                        block_vae, input_size)
        # generate encode bottom half of images block by block
        X_pred = generate_bottom_half(X_pred, h_train[0:nb_images],
                                      conditional, block_vae, pixelcnn)
        # decode encoded images
        elapsed_time_img1 = decode_images_and_predict(X_pred, block_vae,
                                                      decoder_model, cfg,
                                                      'train%d' % i)

    for i in range(10):
        # encode testing images using BlockVAE
        X_pred = encode_blocks_with_VAE(blocks_test, encoder_model, nb_images,
                                        block_vae, input_size)
        # generate encode bottom half of images block by block
        X_pred = generate_bottom_half(X_pred, h_test[0:nb_images], conditional,
                                      block_vae, pixelcnn)
        # decode encoded images
        elapsed_time_img2 = decode_images_and_predict(X_pred, block_vae,
                                                      decoder_model, cfg,
                                                      'test%d' % i)

    start_time_sampled_overhead = time.time()
    # randomly sample images
    X_pred = np.zeros(
        (num_classes, num_blocks_y, num_blocks_x, block_vae.latent_dim))
    h_pred = np.arange(num_classes)
    h_pred = keras.utils.to_categorical(h_pred, num_classes)

    ### generate encoded images block by block
    for i in range(input_size[0]):
        for j in range(input_size[1]):
            if conditional:
                x = [X_pred, h_pred]
            else:
                x = X_pred

            next_X_pred = pixelcnn.model.predict(x, batch_size)
            samp = next_X_pred[:, i, j, :]
            #X_pred[:,i,j,:] = samp
            noise = np.random.randn(num_classes, block_vae.latent_dim)
            noise = np.clip(noise * 0.01, -0.02, 0.02)
            X_pred[:, i, j, :] = samp + noise

    elapsed_time_sampled_overhead = time.time() - start_time_sampled_overhead
    # decode encoded images
    elapsed_time_sampled = decode_images_and_predict(X_pred, block_vae,
                                                     decoder_model, cfg,
                                                     'sampled')
    elapsed_time_sampled += elapsed_time_sampled_overhead

    print "---------------------------------------------"
    print "BlockCNN generation times"
    print "---------------------------------------------"
    print "Elapsed time image 1: ", elapsed_time_img1
    print "Elapsed time image 2: ", elapsed_time_img2
    print "Elapsed time sampled: ", elapsed_time_sampled
    print "---------------------------------------------"

    if args.calc_nll:
        calc_avg_nll(conditional, h_test, encoder_model, block_vae, input_size,
                     pixelcnn, imgs_test, block_size, dataset)