def _execute(self, action): if not isiterable(action.target): action.target = [action.target] cmds = action.get_command() list_mon = [] for target in action.target: node = self.experiment.topology.vs[target] # TODO check if we want something different if node['up'] == DOWN: break thread = self.run_node(target, cmds) # ip = MLCEnvironment._get_ip4(node['MLC_id']) # for cmd in cmds.split("\n"): # thread = ssh(ip, cmd) # thread.next() # thread.next() # Obtain action pid: #Update read: thread.get_stdout() thread.execute(action.get_pid_cmd()) action.pid = int(thread.proc.stdout.readline()) # Monitors need to be multiplied for every command for monitor in action.monitors: aux_mon = copy.copy(monitor) self.monitors.append(aux_mon) list_mon.append(aux_mon) aux_mon.action = action aux_mon.target = target aux_mon.start(self) action.monitors = list_mon
def __set_header_or_footer(self, text_or_element, name='header', style=u"Header"): if name == 'header': header_or_footer = self.get_header() else: header_or_footer = self.get_footer() if header_or_footer is None: header_or_footer = odf_create_element('style:' + name) self.append(header_or_footer) else: header_or_footer.clear() if not isiterable(text_or_element): # Already a header or footer? if (isinstance(text_or_element, odf_element) and text_or_element.get_tag() == 'style:%s' % name): self.delete(header_or_footer) self.append(text_or_element) return text_or_element = [text_or_element] # FIXME cyclic import from paragraph import odf_create_paragraph for item in text_or_element: if type(item) is unicode: paragraph = odf_create_paragraph(item, style=style) header_or_footer.append(paragraph) elif isinstance(item, odf_element): header_or_footer.append(item)
def _stop_nodes(self, nodes): logging.debug('Stopping nodes') if not isiterable(nodes): nodes = [nodes] for node in nodes: n = self.experiment.topology.vs[node] run('mlc_loop --min %i -s' % n['MLC_id']) run('lxc-wait -n mlc%i -s STOPPED' % n['MLC_id']) n['up'] = DOWN
def set_header(self, text_or_element): if not isiterable(text_or_element): text_or_element = [text_or_element] # Remove existing header for element in self.get_elements('text:p'): self.delete(element) for paragraph in reversed(text_or_element): if type(paragraph) is unicode: paragraph = odf_create_paragraph(paragraph) self.insert(paragraph, FIRST_CHILD)
def _start_nodes(self, nodes): #Boot nodes if not isiterable(nodes): nodes = [nodes] for node in nodes: n = self.experiment.topology.vs[node] run('mlc_loop --min %i -b' % n['MLC_id'], async=False) n['up'] = BOOTING #Stablish links t = self.experiment.topology for link in t.es(): self._set_link(link.source, link.target)
def _filt2paths(self, filters): ports = filters.get("port") or [ None ] typeinfos = filters.get("etype") or [ None ] channels = filters.get("channel") or [ None ] units = filters.get("unit") or [ None ] paths = [] if not isiterable(ports): ports = [ ports ] if not isiterable(typeinfos): typeinfos = [ typeinfos ] if not isiterable(channels): channels = [channels] if not isiterable(units): units = [units] for port in ports: for typeinfo in typeinfos: for channel in channels: for unit in units: paths.append((port, typeinfo, channel, unit)) return paths
def schedule(self, action, target, at, delay, inter_delay, command="", monitors=[]): if not isiterable(monitors): monitors = [monitors] for monitor in monitors: monitor.set_filename(self.path, self.name) if target and not isiterable(target): monitor.add_tag(str(target)) monitor.target = target if target is None: target = self.topology.vs.indices if at is None: at = self._last if inter_delay != delta(0) and isiterable(target): result = [] for t in target: monitors_copy = copy.deepcopy(monitors) res = self.schedule(action, t, at, delay, inter_delay, command, monitors_copy) result.append(res) at = None delay = inter_delay return result else: if callable(delay): delay = delta(seconds=delay()) at += delay if at > self._last: self._last = at entry = Action(action, at, target, command, monitors=monitors) self.actions.append(entry) return entry.get_id()
def set_text_box(self, text_or_element=None, text_style=None): text_box = self.get_text_box() if text_box is None: text_box = odf_create_element('draw:text-box') self.append(text_box) else: text_box.clear() if not isiterable(text_or_element): text_or_element = [text_or_element] for item in text_or_element: if isinstance(item, unicode): item = odf_create_paragraph(item, style=text_style) text_box.append(item) return text_box
def __init__(self, datamat = None, index = None): """ Creates a new Datamat from an existing one Parameters: datamat : instance of datamat.Datamat, optional if given, the existing Datamat is copied and only those fixations that are marked True in index are retained. index : list of True or False or an iterable, same length as fields of Datamat Indicates which blocks should be used for the new Datamat and which should be ignored. If index is iterable it indexes all fields as if you would index a numpy array with index. The only exception is that a datamat always holds arrays, never scalar values, as fields. TODO: thoroughly test that all indices work as expected (including slicing etc) The indexing of a DataMat is achieved with the same semantics as the indexing of a numpy array. That is, fancy indexing returns a copy of the fields, whereas other types of indexing such as slicing will return views. This is potentially tricky because all parameters will always be copies. NB: because of this usage of the constructor to filter also, and because of the non-intuitive self object in Python, and because of Python's multiple inheritance, sub-classing Datamat is a royal PITA. """ self._fields = [] self._parameters = {} self._num_fix = 0 if datamat is not None and index is not None: if not isiterable(index): index = [index] self._fields = datamat._fields[:] for field in self._fields: newfield = datamat.__dict__[field][index] num_fix = len(newfield) self.__dict__[field] = newfield self._parameters = datamat._parameters.copy() for (param, value) in datamat._parameters.iteritems(): self.__dict__[param] = value self._num_fix = num_fix
def register(self, callback, data=None, filters=None): import copy from utils import isiterable if filters == None: filters = {} if "etype" in filters: old = filters.get("etype") if not isiterable(old): old = [ old ] filters["etype"] = [ e.__name__ if isinstance(e, type) else e for e in old ] with self.handlers_lock: cb_id = self.handlers_next_id self.handlers_next_id += 1 self.handlers_by_id[cb_id] = (cb_id, callback, copy.copy(filters), data) paths = self._filt2paths(filters) for p in paths: self._savepath(p, cb_id) return cb_id
def __init__(self, categories = None, datamat = None, index = None): """ Creates a new Datamat from an existing one Parameters: categories : optional, instance of stimuli.Categories, allows direct access to image data via the Datamat datamat : instance of datamat.Datamat, optional if given, the existing Datamat is copied and only those fixations that are marked True in index are retained. index : list of True or False or an iterable, same length as fields of Datamat Indicates which blocks should be used for the new Datamat and which should be ignored. If index is iterable it indexes all fields as if you would index a numpy array with index. The only exception is that a datamat always holds arrays, never scalar values, as fields. TODO: thoroughly test that all indices work as expected (including slicing etc) """ self._fields = [] self._categories = categories self._parameters = {} self._num_fix = 0 #warn('this needs to be thoroughly tested for indexes that are not boolean NumPy arrays!') if datamat is not None and index is not None: #index = index.reshape(-1,).astype(bool) #assert index.shape[0] == datamat._num_fix, ("Index vector for " + # "filtering has to have the same length as the fields of the Datamat") #TODO: check this for slicing operations (fields will be views #rather than separate objects. if not isiterable(index): index = [index] self._fields = datamat._fields[:] for field in self._fields: newfield = datamat.__dict__[field][index] num_fix = len(newfield) self.__dict__[field] = newfield self._parameters = datamat._parameters.copy() for (param, value) in datamat._parameters.iteritems(): self.__dict__[param] = value self._num_fix = num_fix
def __init__(self, categories=None, datamat=None, index=None): """ Creates a new Datamat from an existing one Parameters: categories : optional, instance of stimuli.Categories, allows direct access to image data via the Datamat datamat : instance of datamat.Datamat, optional if given, the existing Datamat is copied and only those fixations that are marked True in index are retained. index : list of True or False or an iterable, same length as fields of Datamat Indicates which blocks should be used for the new Datamat and which should be ignored. If index is iterable it indexes all fields as if you would index a numpy array with index. The only exception is that a datamat always holds arrays, never scalar values, as fields. TODO: thoroughly test that all indices work as expected (including slicing etc) """ self._fields = [] self._categories = categories self._parameters = {} self._num_fix = 0 #warn('this needs to be thoroughly tested for indexes that are not boolean NumPy arrays!') if datamat is not None and index is not None: #index = index.reshape(-1,).astype(bool) #assert index.shape[0] == datamat._num_fix, ("Index vector for " + # "filtering has to have the same length as the fields of the Datamat") #TODO: check this for slicing operations (fields will be views #rather than separate objects. if not isiterable(index): index = [index] self._fields = datamat._fields[:] for field in self._fields: newfield = datamat.__dict__[field][index] num_fix = len(newfield) self.__dict__[field] = newfield self._parameters = datamat._parameters.copy() for (param, value) in datamat._parameters.iteritems(): self.__dict__[param] = value self._num_fix = num_fix
def flatten(dm): """ Takes a DataMat who's elements are arrays and returns a flattened copy in which the DataMat element is the lowest atom of data: so no DataMat element contains time-indexed fields: all the time points are directly, flatly, accessible. Makes DataMat potentially extremely long, but eases merging, aligning, and maybe also analysis. """ tmfields = dm.time_based_fields seqfields = [] dbg(2, 'will flatten DataMat with %d elements.' % (len(dm))) #Step 1. Determine which fields need flattening. # TODO: a better test for the sequence fields is needed here. for f in dm.fieldnames(): if (dm.__dict__[f].dtype == np.object) and isiterable(dm.__dict__[f][0]): seqfields += [f] dbg(3, "seqfield: %s, %s, %s" % (f, type(dm.__dict__[f][0]), dm.__dict__[f][0].dtype)) #Step 2. Determine the amount of elements in the fields to be flattened. nelements = [] for dmi in dm: elementn = [len(dmi.field(f)[0]) for f in seqfields] assert(all_same(elementn)) nelements += [elementn[0]] dbg(2, 'flattened DataMat will contain %d elements' % (sum(nelements))) newdm = dm.copy_empty() newdm._num_fix = sum(nelements) nonseqfields = set(seqfields).symmetric_difference(set(dm.fieldnames())) newdata = {} newmask = {} #Step 3. Create new, empty, arrays for each of the non-sequence fields. for f in nonseqfields: dbg(3, "creating empty non-seq field '%s'" % (f)) #to avoid problems with uninitialised values, use ma_nans instead of # ma.empty(sum(nelements), dtype=dm.field(f).dtype) if isiterable(dm.field(f)[0]): fdtype = np.object else: fdtype = dm.field(f).dtype newdata[f] = ma_nans(sum(nelements)).astype(fdtype) #Step 4. Expand all non-sequence fields into the new empty arrays. sidx = 0 for idx, dmi in enumerate(dm): eidx = sidx + nelements[idx] dbg(4, '%d,%d' % (sidx, eidx)) for f in nonseqfields: dbg(3, "element %d/%d: filling non-seq field '%s' [%d:%d] (%s)" % (idx, len(dm), f, sidx, eidx, str(dmi.field(f)[0]))) if isiterable(dmi.field(f)[0]): for ii in xrange(sidx, eidx): newdata[f][ii] = \ dmi.field(f)[0].astype(np.object) else: newdata[f][sidx:eidx] = dmi.field(f)[0] sidx = eidx #Step 5. Stack all the sequence fields together. for f in seqfields: dbg(3, "stacking sequence field '%s'" % (f)) newdata[f] = np.hstack(dm.field(f)) newmask[f] = np.hstack(np.ma.getmaskarray(dm.field(f))) dbg(4, 'newmask[%s]: %s' % (f, newmask[f])) warn('todo: set mask correctly') #Step 6. Create the new DataMat for k, v in newdata.iteritems(): newdm.add_field(k, v) return newdm #newdata, newmask
def _sigmaclip_withaxis(self, data, axis=None, masked=True, return_bounds=False, copy=True): """ Sigma clip the data when ``axis`` is specified. In this case, we replace clipped values with NaNs as placeholder values. """ # float array type is needed to insert nans into the array filtered_data = data.astype(float) # also makes a copy # remove invalid values bad_mask = ~np.isfinite(filtered_data) if np.any(bad_mask): filtered_data[bad_mask] = np.nan # remove masked values and convert to plain ndarray if isinstance(filtered_data, np.ma.MaskedArray): filtered_data = np.ma.masked_invalid(filtered_data).astype(float) filtered_data = filtered_data.filled(np.nan) # convert negative axis/axes from utils import isiterable if not isiterable(axis): axis = (axis, ) axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis) # define the shape of min/max arrays so that they can be broadcast # with the data mshape = tuple(1 if dim in axis else size for dim, size in enumerate(filtered_data.shape)) nchanged = 1 iteration = 0 while nchanged != 0 and (iteration < self.maxiters): iteration += 1 n_nan = np.count_nonzero(np.isnan(filtered_data)) self._compute_bounds(filtered_data, axis=axis) if not np.isscalar(self._min_value): self._min_value = self._min_value.reshape(mshape) self._max_value = self._max_value.reshape(mshape) with np.errstate(invalid='ignore'): filtered_data[(filtered_data < self._min_value) | (filtered_data > self._max_value)] = np.nan nchanged = n_nan - np.count_nonzero(np.isnan(filtered_data)) self._niterations = iteration if masked: # create an output masked array if copy: filtered_data = np.ma.masked_invalid(filtered_data) else: # ignore RuntimeWarnings for comparisons with NaN data values with np.errstate(invalid='ignore'): out = np.ma.masked_invalid(data, copy=False) filtered_data = np.ma.masked_where(np.logical_or( out < self._min_value, out > self._max_value), out, copy=False) if return_bounds: return filtered_data, self._min_value, self._max_value else: return filtered_data
def clean(self): cleaned_data = super(publisher_form, self).clean() cleaned_data['person_name'] = self.clean_person_name( cleaned_data['person_name']) cleaned_data['candidate'] = cleaned_data['candidate'].replace( "%20", ' ') # replace url entities cleaned_data['candidate'] = cleaned_data['candidate'].replace( "%25", ' ') # replace url entities cleaned_data['candidate'] = cleaned_data['candidate'].replace( " ", ' ') # double space to single space if cleaned_data[ 'website'] != '': # when empty, it's an empty string - we want it to be empty - it's a honeypot raise exceptions.ValidationError( 'Thanks for filling in the website field!') if not 'candidate_id' in self.cleaned_data: candidate_id = None else: candidate_id = self.cleaned_data['candidate_id'] cleaned_data['user_object'] = utils.find_or_make_user( cleaned_data['person_name'], self.request_ip, cleaned_data['state']) if candidate_id: # if they provided a candidate_id try: cleaned_data['candidate'] = candidate.objects.get( pk=candidate_id) except candidate.DoesNotExist: try: cleaned_data['candidate'] = utils.find_or_make_candidate( cleaned_data['candidate'], cleaned_data['user_object'], exact=True) except: raise exceptions.ValidationError( 'Candidate ID was set, but does not correspond to an actual candidate' ) else: # ok, now make it do the magic of finding a candidate by name if cleaned_data['candidate']: try: # TODO: This should be a single candidate and in some cases, it's returning more than one cleaned_data['candidate'] = utils.find_or_make_candidate( cleaned_data['candidate'], cleaned_data['user_object'], exact=True) except: raise exceptions.ValidationError( "Problem looking up candidate. We hope this is temporary. Would you care to try again?" ) else: raise exceptions.ValidationError( "No candidate provided. Who are you planning on voting for?" ) if utils.isiterable(cleaned_data['candidate']): # if it would result in multiple candidates, just take the first cleaned_data['candidate'] = cleaned_data['candidate'][0] return cleaned_data
def modify_command(self, action_ids, new_command): if not isiterable(action_ids): action_ids = [action_ids] for action in self.actions: if action.get_id() in action_ids: action.modify(new_command)