# `IMAGE_SIZE` is an (int, int) tuple of the image size of the first image. # We assume that each image is of the same size as the image labelled "0" config.IMAGE_SIZE = np.shape(imread(config.IMAGE_TEMPLATE.format(0))) # `IMAGES` holds the original digit images. config.IMAGES = [ cv2.cvtColor( cv2.resize(imread(config.IMAGE_TEMPLATE.format(digit)), dsize=(TRAINING_CONFIG.grid.render_shape)), cv2.COLOR_RGBA2RGB) for digit in range(10) ] # Initialise the checkpoint if config.CONDITION == 0: config.PROCESSOR = Stimulus else: LEARNING_RATE_ENCODER = TRAINING_CONFIG.learning_rate_encoder LEARNING_RATE_DECODER = TRAINING_CONFIG.learning_rate_decoder ENCODER_OPTIMISER = tf.keras.optimizers.Adam( learning_rate=LEARNING_RATE_ENCODER) DECODER_OPTIMISER = tf.keras.optimizers.Adam( learning_rate=LEARNING_RATE_DECODER) CHECKPOINT = tf.train.Checkpoint( encoder=TRAINING_CONFIG.encoder, decoder=TRAINING_CONFIG.decoder, encoder_optimiser=ENCODER_OPTIMISER, decoder_optimiser=DECODER_OPTIMISER, )
try: grid = grids[config.GRID_TYPE]() except KeyError: with open(config.GRID_TYPE, 'rb') as infile: grid = pickle.load(infile) config.GRID = grid # We initiate the stimulus processor type. processors = { 'direct': Stimulus, 'net': lambda image, grid: StimulusNet(image, grid, config.ENCODER), } config.PROCESSOR = processors[config.PROCESSOR_TYPE] # Templates for data paths. config.DATETIME_FORMAT = '%Y-%m-%d_%H-%M-%S' config.DIGIT_SOUND_TEMPLATE = './data/digit-voice/{}-alt.wav' base_dir = './data/psychophysics-sessions/' if config.TESTING: config.CONFIG_FILE_TEMPLATE = base_dir + 'tests/{}_{}_config.json' config.SESSION_FILE_TEMPLATE = base_dir + 'tests/{}_{}_session.csv' config.MOUSE_FILE_TEMPLATE = base_dir + 'tests/{}_{}_mouse.csv' else: config.CONFIG_FILE_TEMPLATE = base_dir + 'participants/{}_{}_config.json' config.SESSION_FILE_TEMPLATE = base_dir + 'participants/{}_{}_session.csv' config.MOUSE_FILE_TEMPLATE = base_dir + 'participants/{}_{}_mouse.csv'