def _validate_hit_on_go(self, trial, outcome, correct_side): """For hits on go trials, rewarded side should match correct side And there should be at least one poke in correct side """ assert (trial.states.__dict__[correct_side + '_reward'].size == 2) assert (trial.pokes.__dict__[str.upper(correct_side[0])].size > 0) assert (trial.states.hit_on_go.size == 2)
def __init__(self, logfile, instance, namespace, location, remote_conn_details): super(ConnectMUMPS, self).__init__() self.type = str.lower(instance) self.namespace = str.upper(namespace) self.prompt = self.namespace + '>' # Create a new SSH client object client = paramiko.SSHClient() # Set SSH key parameters to auto accept unknown hosts client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # Connect to the host client.connect(hostname=remote_conn_details.remote_address, port=remote_conn_details.remote_port, username=remote_conn_details.username, password=remote_conn_details.password) # Create a client interaction class which will interact with the host from paramikoe import SSHClientInteraction interact = SSHClientInteraction(client, timeout=10, display=False) self.connection = interact self.connection.logfile_read = open(logfile, 'w') self.client = client # apparently there is a deconstructor which disconnects (probably sends a FYN packet) when client is gone
def _validate_error_on_nogo(self, trial, outcome, correct_side): """For errors on nogo trials, no reward should have been delivered And at least one entry into the correct side """ assert (trial.states.left_reward.size == 0) assert (trial.states.right_reward.size == 0) assert (trial.pokes.__dict__[str.upper(correct_side[0])].size > 0) assert (trial.states.error_on_nogo.size == 2)
def getXYT(xyt_filename, match_only=False): # Read in a .fits or .npz file containing the output of the RHT. # If match_only is given, and a dictionary of Keys: # This will return whether ALL keys are found in the data of the given file # Else: # This will return the image coordinates of significant linearity, and the theta power spectrum at those coords. # This will return as two integer arrays of some_length, and an ntheta*some_length array of theta power if not os.path.isfile(xyt_filename): # Fast Failure Case - This file does not exist. if match_only: return False else: raise ValueError( 'Input xyt_filename in getXYT matches no existing file') else: # Attempts to extract header information for Matching, or else the data itself if xyt_filename.endswith('.npz'): # Allows very large files to be read in. data = np.load(xyt_filename, mmap_mode='r') if match_only: try: return all([ match_only[x] == data[str.lower(x)] for x in list(match_only.keys()) ]) except KeyError: return False Hi = data['hi'] Hj = data['hj'] Hthets = data['hthets'] elif xyt_filename.endswith('.fits'): hdu_list = fits.open( xyt_filename, mode='readonly', memmap=True, save_backup=False, checksum=True) #Allows for reading in very large files! header = hdu_list[0].header if match_only: try: return all([ match_only[x] == header[str.upper(x)] for x in list(match_only.keys()) ]) except KeyError: return False data = hdu_list[1].data Hi = data['hi'] Hj = data['hj'] Hthets = data['hthets'] else: raise ValueError( 'Supported input types in getXYT include .npz and .fits only') rebuild = None # Formats output properly if rebuild and filepath is not None: # Can recreate an entire 3D array of mostly 0s. data = getData(filepath) datay, datax = data.shape ntheta = Hthets[0].shape if BUFFER: xyt = np.memmap(tempfile.TemporaryFile(), dtype=DTYPE, mode='w+', shape=(datay, datax, ntheta)) xyt.fill(0.0) else: print( 'Warning: Reconstructing very large array in memory! Set BUFFER to True!' ) xyt = np.zeros((datay, datax, ntheta)) coords = list(zip(Hj, Hi)) for c in range(len(coords)): j, i = coords[c] xyt[j, i, :] = Hthets[c] return xyt else: # Returns the sparse, memory mapped form only. return Hi, Hj, Hthets
def validate(self): """Runs validation checks on the loaded data. There are unlimited consistency checks we could do, but only a few easy checks are implemented. The most problematic error would be inconsistent data in TRIALS_INFO, for example if the rows were written with the wrong trial number or something. That's the primary thing that is checkoed. It is assumed that we can trust the state machine states. So, the pokes are not explicitly checked to ensure the exact timing of behavioral events. This would be a good feature to add though. Instead, the indicator states are matched to TRIALS_INFO. When easy, I check that at least one poke in the right port occurred, but I don't check that it actually occurred in the window of opportunity. No block information is checked. This is usually pretty obvious if it's wrong. If there is a known problem on certain trials, set self.skip_trial_set to a list of trials to skip. Rows of TRIALS_INFO for which TRIAL_NUMBER matches a member of this set will be skipped (not validated). Checks: 1) Does the *_istate outcome match the TRIALS_INFO outcome 2) For each possible trial outcome, the correct port must have been entered (or not entered). 3) The stim number in TRIALS_INFO should match the other TRIALS_INFO characteristics in accordance with SOUNDS_INFO. 4) Every trial in peh should be in TRIALS_INFO, all others should be FUTURE_TRIAL. """ # Shortcut references to save typing CONSTS = self.data['CONSTS'] TRIALS_INFO = self.data['TRIALS_INFO'] SOUNDS_INFO = self.data['SOUNDS_INFO'] peh = self.data['peh'] datasink = self.data['datasink'] # Some inverse maps for looking up data in TRIALS_INFO outcome_map = dict((CONSTS[str.upper(s)], s) for s in \ ('hit', 'error', 'wrong_port')) left_right_map = dict((CONSTS[str.upper(s)], s) for s in \ ('left', 'right')) go_or_nogo_map = dict((CONSTS[str.upper(s)], s) for s in \ ('go', 'nogo')) # Go through peh and for each trial, match data to TRIALS_INFO # Also match to datasink. Note that datasink is a snapshot taken # immediately before the next trial state machine was uploaded. # So it contains some information about previous trial and some # about next. It is also always length 1 more than peh for n, trial in enumerate(peh): # Skip trials if TRIALS_INFO['TRIAL_NUMBER'][n] in self.skip_trial_set: continue # Extract info from the current row of TRIALS_INFO outcome = outcome_map[TRIALS_INFO['OUTCOME'][n]] correct_side = left_right_map[TRIALS_INFO['CORRECT_SIDE'][n]] go_or_nogo = go_or_nogo_map[TRIALS_INFO['GO_OR_NOGO'][n]] # Note that we correct for 1- and 0- indexing into SOUNDS_INFO here stim_number = TRIALS_INFO['STIM_NUMBER'][n] - 1 # TRIALS_INFO is internally consistent with sound parameters assert(TRIALS_INFO['CORRECT_SIDE'][n] == \ SOUNDS_INFO['correct_side'][stim_number]) assert(TRIALS_INFO['GO_OR_NOGO'][n] == \ SOUNDS_INFO['go_or_nogo'][stim_number]) # If possible, check datasink if not self.v2_behavior: # Check that datasink is consistent with TRIALS_INFO # First load the n and n+1 sinks, since the info is split # across them. The funny .item() syntax is because loading # Matlab structs sometimes produces 0d arrays. # This little segment of code is the only place where the # datasink is checked. prev_sink = datasink[n] next_sink = datasink[n + 1] try: assert(prev_sink.next_sound_id.stimulus.item() == \ TRIALS_INFO['STIM_NUMBER'][n]) assert(prev_sink.next_side.item() == \ TRIALS_INFO['CORRECT_SIDE'][n]) assert(prev_sink.next_trial_type.item() == \ TRIALS_INFO['GO_OR_NOGO'][n]) assert(next_sink.finished_trial_num.item() == \ TRIALS_INFO['TRIAL_NUMBER'][n]) assert(CONSTS[next_sink.finished_trial_outcome.item()] == \ TRIALS_INFO['OUTCOME'][n]) except AttributeError: # .item() syntax only required for some versions of scipy assert(prev_sink.next_sound_id.stimulus == \ TRIALS_INFO['STIM_NUMBER'][n]) assert(prev_sink.next_side == \ TRIALS_INFO['CORRECT_SIDE'][n]) assert(prev_sink.next_trial_type == \ TRIALS_INFO['GO_OR_NOGO'][n]) assert(next_sink.finished_trial_num == \ TRIALS_INFO['TRIAL_NUMBER'][n]) assert(CONSTS[next_sink.finished_trial_outcome] == \ TRIALS_INFO['OUTCOME'][n]) # Sound name is correct # assert(SOUNDS_INFO.sound_names[stim_number] == datasink[sound name] # Validate trial self._validate_trial(trial, outcome, correct_side, go_or_nogo) # All future trials should be marked as such # Under certain circumstances, TRIALS_INFO can contain information # about one more trial than peh. I think this is if the protocol # is turned off before the end of the trial. try: assert(np.all(TRIALS_INFO['OUTCOME'][len(peh):] == \ CONSTS['FUTURE_TRIAL'])) except AssertionError: print("warn: at least one more trial in TRIALS_INFO than peh.") print("checking that it is no more than one ...") assert(np.all(TRIALS_INFO['OUTCOME'][len(peh)+1:] == \ CONSTS['FUTURE_TRIAL']))
def common_args(parser, function_name, valid_functions=['gaperture', 'gmap', 'gfind']): """ Defines the arguments and options for the parser object when called from the command line. Accepts a string used to determine which arguments to add to the Parser object. Valid function names are "gfind", "gaperture", or "gmap" (all case insensitive). :param parser: Command-line options to check and modify. :type parser: argparse.ArgumentParser Namespace :param function_name: Name of the function being called. :type function_name: str :param valid_functions: List of known/valid functions. :type valid_functions: list :returns: argparse.ArgumentParser Namespace -- The updated command-line arguments. """ try: function_name = function_name.strip().lower() except AttributeError: raise gPhotonArgsError("Invalid function: {f}".format(f=function_name)) if not function_name in valid_functions: raise gPhotonArgsError("{f} not in {vf}".format(f=function_name, vf=valid_functions)) parser.add_argument( "-b", "--band", action="store", type=lambda b: str.upper(str(b)), dest="band", help="Band designation", default=str(u"BOTH") if function_name == 'gfind' else str(u"NUV"), choices=[str(u"NUV"), str(u"FUV")]+( [str(u"BOTH")] if function_name == 'gfind' else [])) parser.add_argument("-d", "--dec", action="store", type=float, dest="dec", metavar="DEC", help="Center Declination position in decimal degrees. " "Must be 0 < DEC < 90.") parser.add_argument("--detsize", action="store", type=float, dest="detsize", default=1.1, help="Set the effective field diameter in degrees for" " the exposure search. Default = 1.1.") parser.add_argument("-g", "--gap", "--maxgap", action="store", type=float, dest="maxgap", default=1500., help="Maximum gap size in seconds for data to be" " considered contiguous. Default = 1500.") parser.add_argument("--minexp", action="store", type=float, dest="minexp", help="Minimum contiguous exposure in" " seconds for data to be reported. Default = 1.", default=1.) parser.add_argument("-r", "--ra", action="store", type=float, dest="ra", help="Center Right Ascension position in decimal" " degrees. Must be 0 < RA < 360.", metavar="RA") parser.add_argument("--retries", action="store", type=int, dest="retries", help="Set the number of times to ping the server for a" " response before defining a query failure. Default is" " 20, set to a large number if you expect, or want to" " allow, the query to take a long time without issuing" " a failure.", default=20) parser.add_argument("--skypos", action="store", dest="skypos", help="Alternate method for specifying sky position" " with format '[RA,Dec]'", type=ast.literal_eval) parser.add_argument("--t0", "--tmin", action="store", type=float, dest="tmin", help="Minimum date of observation to" " consider (specify in GALEX time standard). Default" " = 6e8", default=6.e8) parser.add_argument("--t1", "--tmax", action="store", type=float, dest="tmax", help="Maxium date of observation to" " consider (specify in GALEX time standard). Default" " = 11e8", default=11.e8) parser.add_argument("--trange", "--tranges", action="store", dest="trange", help="Time range(s) in which to limit the search, in" " the format '[t0,t1]' or '[[t0_a,t1_a],[t0_b,t1_b]]'" " (format in GALEX time).", type=ast.literal_eval) parser.add_argument("-v", "--verbose", action="store", type=int, dest="verbose", help="Prints extra information to" " STDOUT (higher number = more output). Choices are" " {0,1,2,3}, default = 0.", default=0, choices=[0, 1, 2, 3]) parser.add_argument("--suggest", action="store_true", dest="suggest", help="Suggest reasonable parameters for aperture" " photometry. The includes recenting on the nearest" " MCAT source. This flag will clobber other annuli and" " aperture radii parameters.", default=False) parser.add_argument("--skyrange", action="store", dest="skyrange", type=ast.literal_eval, help="Two element list of ra" " and dec ranges. Equivalent to separately setting" " --raangle and decangle.") if function_name in ['gaperture', 'gmap']: parser.add_argument("--calpath", action="store", type=str, dest="calpath", default=os.pardir+os.sep+"cal"+os.sep, help="Path to the directory that contains the" " calibration files.") parser.add_argument("--coadd", action="store_true", dest="coadd", help="Return the coadded flux (gAperture) or a" " coadded image (gMap) over all requested time" " ranges? Default = False.", default=False) parser.add_argument("--overwrite", "--ow", "--clobber", action="store_true", dest="overwrite", help="Overwrite existing output files? Default =" " False.", default=False) parser.add_argument("-s", "--step", "--stepsz", "--frame", action="store", type=float, dest="stepsz", help="Step size for lightcurve or movie in" " seconds. Default = 0. (no binning).", default=0.) return parser
def common_args(parser, function_name, valid_functions=['gaperture', 'gmap', 'gfind']): """ Defines the arguments and options for the parser object when called from the command line. Accepts a string used to determine which arguments to add to the Parser object. Valid function names are "gfind", "gaperture", or "gmap" (all case insensitive). :param parser: Command-line options to check and modify. :type parser: argparse.ArgumentParser Namespace :param function_name: Name of the function being called. :type function_name: str :param valid_functions: List of known/valid functions. :type valid_functions: list :returns: argparse.ArgumentParser Namespace -- The updated command-line arguments. """ try: function_name = function_name.strip().lower() except AttributeError: raise gPhotonArgsError("Invalid function: {f}".format(f=function_name)) if not function_name in valid_functions: raise gPhotonArgsError("{f} not in {vf}".format(f=function_name, vf=valid_functions)) parser.add_argument( "-b", "--band", action="store", type=lambda b: str.upper(str(b)), dest="band", help="Band designation", default=str(u"BOTH") if function_name == 'gfind' else str(u"NUV"), choices=[str(u"NUV"), str(u"FUV")] + ([str(u"BOTH")] if function_name == 'gfind' else [])) parser.add_argument("-d", "--dec", action="store", type=float, dest="dec", metavar="DEC", help="Center Declination position in decimal degrees. " "Must be 0 < DEC < 90.") parser.add_argument("--detsize", action="store", type=float, dest="detsize", default=1.1, help="Set the effective field diameter in degrees for" " the exposure search. Default = 1.1.") parser.add_argument("-g", "--gap", "--maxgap", action="store", type=float, dest="maxgap", default=1500., help="Maximum gap size in seconds for data to be" " considered contiguous. Default = 1500.") parser.add_argument("--minexp", action="store", type=float, dest="minexp", help="Minimum contiguous exposure in" " seconds for data to be reported. Default = 1.", default=1.) parser.add_argument("-r", "--ra", action="store", type=float, dest="ra", help="Center Right Ascension position in decimal" " degrees. Must be 0 < RA < 360.", metavar="RA") parser.add_argument("--retries", action="store", type=int, dest="retries", help="Set the number of times to ping the server for a" " response before defining a query failure. Default is" " 20, set to a large number if you expect, or want to" " allow, the query to take a long time without issuing" " a failure.", default=20) parser.add_argument("--skypos", action="store", dest="skypos", help="Alternate method for specifying sky position" " with format '[RA,Dec]'", type=ast.literal_eval) parser.add_argument("--t0", "--tmin", action="store", type=float, dest="tmin", help="Minimum date of observation to" " consider (specify in GALEX time standard). Default" " = 6e8", default=6.e8) parser.add_argument("--t1", "--tmax", action="store", type=float, dest="tmax", help="Maxium date of observation to" " consider (specify in GALEX time standard). Default" " = 11e8", default=11.e8) parser.add_argument("--trange", "--tranges", action="store", dest="trange", help="Time range(s) in which to limit the search, in" " the format '[t0,t1]' or '[[t0_a,t1_a],[t0_b,t1_b]]'" " (format in GALEX time).", type=ast.literal_eval) parser.add_argument("-v", "--verbose", action="store", type=int, dest="verbose", help="Prints extra information to" " STDOUT (higher number = more output). Choices are" " {0,1,2,3}, default = 0.", default=0, choices=[0, 1, 2, 3]) parser.add_argument("--suggest", action="store_true", dest="suggest", help="Suggest reasonable parameters for aperture" " photometry. The includes recenting on the nearest" " MCAT source. This flag will clobber other annuli and" " aperture radii parameters.", default=False) parser.add_argument("--skyrange", action="store", dest="skyrange", type=ast.literal_eval, help="Two element list of ra" " and dec ranges. Equivalent to separately setting" " --raangle and decangle.") if function_name in ['gaperture', 'gmap']: parser.add_argument("--calpath", action="store", type=str, dest="calpath", default=os.pardir + os.sep + "cal" + os.sep, help="Path to the directory that contains the" " calibration files.") parser.add_argument("--coadd", action="store_true", dest="coadd", help="Return the coadded flux (gAperture) or a" " coadded image (gMap) over all requested time" " ranges? Default = False.", default=False) parser.add_argument("--overwrite", "--ow", "--clobber", action="store_true", dest="overwrite", help="Overwrite existing output files? Default =" " False.", default=False) parser.add_argument("-s", "--step", "--stepsz", "--frame", action="store", type=float, dest="stepsz", help="Step size for lightcurve or movie in" " seconds. Default = 0. (no binning).", default=0.) return parser
def char_range(cls, c1, c2, prefix=""): for c in range(ord(c1), ord(c2) + 1): str = "%s%s" % (prefix, chr(c)) yield str.upper()