def vector_processing(data,params,**kwargs): """ Returns a subset of the data based on provided inputs """ def val(F,n): return F[:,:n] def flow(F,n): return F[:,n:] def log_flow(F,n): return np.log10(flow(F,n)) parser = KwargParser() parser.add('field','primal') #primal/dual parser.add('option','val') args = parser.parse(kwargs) if 'comp' == args['field']: data_field = data['primal'] * data['dual'] else: data_field = data[args['field']] fn = eval(args['option']) discretizer = params['discretizer'] A = discretizer.get_num_actions() n = discretizer.get_num_nodes() return fn(data_field,n)
def read_pickle(filename): """ Read in information from a pickle file Should have the discretizer, at least """ params = pickle.load(open(filename,'rb')) parser = KwargParser() parser.add('instance_builder') parser.add('solver_generator') parser.add('inst_conf_file') parser.add('solver_conf_file') parser.add('objects') params = parser.parse(params) return params
def final_frame(data,params,**kwargs): """ Like frame processing, but only the final frame """ # Inner functions for different processing options def value(F): return F[0,:,:] def flow(F): return np.argmax(F[1:,:,:],axis=0) def agg_flow(F): return np.sum(F[1:,:,:],axis=0) def log_agg_flow(F): return np.log10(np.sum(F[1:,:,:],axis=0)) def adv(F): SF = np.sort(F[1:,:,:],axis=0) return np.log(SF[-1,:,:] - SF[-2,:,:] + 1e-22) parser = KwargParser() parser.add('field','primal') #primal/dual parser.add('option','value') args = parser.parse(kwargs) if 'comp' == args['field']: data_field = data['primal'] * data['dual'] else: data_field = data[args['field']] fn = eval(args['option']) discretizer = params['discretizer'] A = discretizer.get_num_actions() n = discretizer.get_num_nodes() (x,y) = discretizer.get_basic_lengths() frames = split_into_frames(data_field,A,n,x,y) return fn(frames[-1,:,:,:])
def time_series(data,params,**kwargs): def identity(x): return x def log(x): return np.log10(x) def log_vector_cond(X): """ Assumes X > 0, and it's of the form (I,N) = X.shape where the rows are iteration numbers and columns are components of the solution vector for that iteration. The "vector condition number" is the ratio between the max and the min values of X. Log is taken because the condition numbers can blow up. """ assert (2 == len(X.shape)) assert(not np.any(X <= 0)) (I,N) = X.shape ret = np.amax(X,axis=1) / np.amin(X,axis=1) assert((I,) == ret.shape) return np.log10(ret) parser = KwargParser() parser.add('field') parser.add('option','identity') args = parser.parse(kwargs) fn = eval(args['option']) ret = fn(data[args['field']]) assert(1 == len(ret.shape)) # Should be a vector return ret
def frame_processing(data,params,**kwargs): """ Turns data into movie frames based on provided inputs """ # Inner functions for different processing options def value(F): return F[:,0,:,:] def flow(F): return np.argmax(F[:,1:,:,:],axis=1) def agg_flow(F): return np.sum(F[:,1:,:,:],axis=0) def adv(F): SF = np.sort(F[:,1:,:,:],axis=1) return np.log(SF[:,-1,:,:] - SF[:,-2,:,:] + 1e-22) parser = KwargParser() parser.add('field','primal') #primal/dual parser.add('option','value') args = parser.parse(kwargs) if 'comp' == args['field']: data_field = data['primal'] * data['dual'] else: data_field = data[args['field']] fn = eval(args['option']) discretizer = params['discretizer'] A = discretizer.get_num_actions() n = discretizer.get_num_nodes() (x,y) = discretizer.get_basic_lengths() frames = split_into_frames(data_field,A,n,x,y) return fn(frames)