from utils import arr, TRAIN, VALID, invert_dict import paths from utils.plt import show_animate from scripts.prep import * from scripts.load import LoaderCon n_frames = 32 n_channels = 1 output_shape = n_frames, 128, 128 #240, 320 l = LoaderCon( data_path=paths.CON_PREP2, inputs={ # "target": arr((1,), "int32"), "target": arr((n_frames, ), "int32"), # "video": arr(output_shape+(n_channels,), "float32"), "video": arr(output_shape, "float32"), "path": arr((), "S128"), }, preprocessors=[ LabelsCon(), VideoLoadPrep(n_frames=n_frames, rand_middle_frame=True, rgb=True, depth=False, use_bcolz=True, tolerance=0, rgbbias=-127.5), Augment3D( output_shape=output_shape,
import paths # INPUT DATA ############ batch_size = 24 batches_per_chunk = 1 chunk_size = batch_size*batches_per_chunk im_shp = 128, 128 n_channels = 2 n_frames = 32 voc_size = 249 # n_classes data_tags = OrderedDict( target=arr((n_frames,), "int32"), video=arr((n_frames,) + im_shp + (n_channels,), "float32"), ) if n_channels == 1: data_tags["video"] = arr((n_frames,) + im_shp, "float32") augm_params={ "translation": [0, 16, 16], "rotation": [8, 0, 0], "shear": [0, 0, 0], "scale": [1, 1.5, 1.5], "reflection": [0, 0, .5] # Bernoulli p } print "augm_params", augm_params data_loader = Loader(