def delDomainList(self, args): data = json.loads(args.siteList) # site_file = self.SITE_DEL_FILE; # sites_data = self.getReadFile(site_file) successSize = [] failureSize = [] for site in data: site_obj = dict_obj() site_obj.id = site["id"] site_obj.webname = site["name"] site_obj.path = site["path"] site["database"] = '1' site["ftp"] = '1' site_obj.database = site['database'] site_obj.ftp = site['ftp'] # 批量删除站点 data = BT_SITE.DeleteSite(site_obj) if data['status'] == True: successSize.append(data.copy()) if data['status'] == False: failureSize.append(data.copy()) count = successSize + failureSize return { "status": "success", "site": { "count": len(count), "successSize": len(successSize), "failureSize": len(failureSize) } }
def cachedCodeCopy(codeDoc, linedelta=None): res = data.copy(codeDoc) def applyLineDelta(x): if hasattr(x, "lineno"): x.lineno += linedelta if isinstance(x, Doc): for subx in x.propertyValues(): applyLineDelta(subx) for subx in x.content(): applyLineDelta(subx) applyLineDelta(res) return res
def setBlockCache(self, blocktext, lines, lineno): #print '---CACHE STORE--- %s:' % self #print blocktext.splitlines()[0] #print '...' #print blocktext.splitlines()[-1] #print textid = id(blocktext) copy = [data.copy(line) for line in lines] self._blockCache[blocktext] = (copy, lineno, textid) self._blockCacheUsed.append(textid) self._blockCacheSize += len(blocktext) # {{{ remove old caches if cache is too big while self._blockCacheSize > LanguageImpl.maxCacheSize: delid = self._blockCacheUsed[0] for text, (lines, lineno, textid) in self._blockCache.items(): if textid == delid: del self._blockCache[text] del self._blockCacheUsed[0] self._blockCacheSize -= len(text) break else: assert 0, "id not in _blockCache"
def collect_diff_mask_meta(self, label=None, filename=None, save=False, dtype=None, **kwargs): """ attempt to save diffraction data PARAMETERS ----------- label : str ptypy label of the scan to save if None, tries to save ALL diffraction data filename : str override the file path to write to will change `data_filename` in `scan_info` dict all other kwargs are added to 'scan_info' key in the '.h5' file """ if label is None: scans = {} for l in self.scans.keys(): scans[l] = self.collect_diff_mask_meta(l, filename, save, dtype, **kwargs) return scans else: dct = {} # get the scan scan = self.scans[label] for kind in ['mask', 'diff']: storage = scan[kind] # fresh copy new = [ data.copy() if data is not None else None for data in storage.datalist ] Nframes = len(new) if parallel.MPIenabled: logger.info('Using MPI to gather arrays for storing %s' % kind) for i in range(Nframes): if parallel.master: # Root receives the data if it doesn't have it yet if new[i] is None: new[i] = parallel.receive() logger.info( '%s :: Frame %d/%d received at process %d' % (kind.upper(), i, Nframes, parallel.rank), extra={'allprocesses': True}) parallel.barrier() else: if new[i] is not None: # Send data to root. parallel.send(new[i]) #logger.info('Process %d - Send frame %d of %s' % (parallel.rank,i,kind), extra={'allprocesses':True}) sender = parallel.rank logger.info( '%s :: Frame %d/%d send from process %d' % (kind.upper(), i, Nframes, parallel.rank), extra={'allprocesses': True}) parallel.barrier() parallel.barrier() # storing as arrays if parallel.master: key = 'data' if kind == 'diff' else kind dct[key] = np.asarray(new) # save if you are master if parallel.master: # get meta data meta = self.scans[label]['meta'] # update with geometric info meta.update(scan.pars.geometry.copy()) # translate to scan_info and ditch variables not in data.DEFAULT_scan_info from data import MT as LeTraducteur dct['scan_info'] = LeTraducteur.as_scan_info( self.scans[label]['meta']) # overwrite filename if filename is not None: dct['scan_info']['data_filename'] = filename filename = dct['scan_info'].get('data_filename') # add other kwargs to scan_info dct['scan_info'].update(kwargs) dct['scan_info']['shape'] = dct['data'].shape # switch data type for data if wanted (saves space) if dtype is not None: dct['data'] = dct['data'].astype(dtype) if save: # cropping from .. import io filename = u.clean_path(filename) logger.info('Saving to ' + filename) io.h5write(filename, dct) logger.info('Saved') return filename return dct
def collect_diff_mask_meta(self,label=None,filename =None,save=False,dtype=None,**kwargs): """ attempt to save diffraction data PARAMETERS ----------- label : str ptypy label of the scan to save if None, tries to save ALL diffraction data filename : str override the file path to write to will change `data_filename` in `scan_info` dict all other kwargs are added to 'scan_info' key in the '.h5' file """ if label is None: scans={} for l in self.scans.keys(): scans[l]=self.collect_diff_mask_meta(l,filename,save,dtype,**kwargs) return scans else: dct ={} # get the scan scan = self.scans[label] for kind in ['mask','diff']: storage=scan[kind] # fresh copy new = [data.copy() if data is not None else None for data in storage.datalist] Nframes = len(new) if parallel.MPIenabled: logger.info('Using MPI to gather arrays for storing %s' % kind) for i in range(Nframes): if parallel.master: # Root receives the data if it doesn't have it yet if new[i] is None: new[i] = parallel.receive() logger.info('%s :: Frame %d/%d received at process %d' %(kind.upper(),i,Nframes,parallel.rank) , extra={'allprocesses':True}) parallel.barrier() else: if new[i] is not None: # Send data to root. parallel.send(new[i]) #logger.info('Process %d - Send frame %d of %s' % (parallel.rank,i,kind), extra={'allprocesses':True}) sender=parallel.rank logger.info('%s :: Frame %d/%d send from process %d' %(kind.upper(),i,Nframes,parallel.rank) , extra={'allprocesses':True}) parallel.barrier() parallel.barrier() # storing as arrays if parallel.master: key = 'data' if kind=='diff' else kind dct[key] = np.asarray(new) # save if you are master if parallel.master: # get meta data meta = self.scans[label]['meta'] # update with geometric info meta.update(scan.pars.geometry.copy()) # translate to scan_info and ditch variables not in data.DEFAULT_scan_info from data import MT as LeTraducteur dct['scan_info'] = LeTraducteur.as_scan_info(self.scans[label]['meta']) # overwrite filename if filename is not None: dct['scan_info']['data_filename']= filename filename = dct['scan_info'].get('data_filename') # add other kwargs to scan_info dct['scan_info'].update(kwargs) dct['scan_info']['shape']=dct['data'].shape # switch data type for data if wanted (saves space) if dtype is not None: dct['data']=dct['data'].astype(dtype) if save: # cropping from .. import io filename = u.clean_path(filename) logger.info('Saving to ' +filename) io.h5write(filename,dct) logger.info('Saved') return filename return dct
def __cb_buttonpress(self, widget, data): "Callback for handling mouse clicks" path = self.get_path_at_pos(int(data.x), int(data.y)) # handle click outside entry if path is None: self.unselect_all() # handle doubleclick if data.button == 1 and data.type == gtk.gdk._2BUTTON_PRESS and path != None: iter = self.model.get_iter(path[0]) self.toggle_expanded(iter) if iter != None: self.emit("doubleclick", iter) # display popup on right-click elif data.button == 3: if path != None and self.selection.iter_is_selected(self.model.get_iter(path[0])) == False: self.set_cursor(path[0], path[1], False) self.emit("popup", data) return True # handle drag-and-drop of multiple rows elif self.__cbid_drag_motion == None and data.button in ( 1, 2 ) and data.type == gtk.gdk.BUTTON_PRESS and path != None and self.selection.iter_is_selected(self.model.get_iter(path[0])) == True and len(self.get_selected()) > 1: self.__cbid_drag_motion = self.connect("motion_notify_event", self.__cb_drag_motion, data.copy() ) self.__cbid_drag_end = self.connect("button_release_event", self.__cb_button_release, data.copy() ) return True
def addToITunes(self): """ Begin the conversion process of all the valid items in the queue. """ self.validRecordings = [] """A list containing the values from self.queueItemData, but only for recordings with all the required metadata""" self.currentRecording = 0 """The index of self.validRecordings that is currently being converted and copied to iTunes""" self.currentTrack = 0 """The track from the current recording currently being processed""" if not hasattr(self, 'antiCrashBin'): self.antiCrashBin = [] """For reasons unknown, Windows 7 crashes when PCMReader objects go out of scope. My inelegant solution is to keep those objects in this antiCrashBin so that if the user converts multiple batches in one session the program won't crash. """ # Count all tracks for the progress bar and load recording # data into self.validRecordings in the order that the items # appear in the queue. self.trackCount = 0 [ self.validRecordings.append(None) for x in range(len(self.queueItemData)) ] for dir, data in self.queueItemData.iteritems(): if data['valid'] == True: rowForItemInQueue = self.queueListWidget.row(data['item']) self.validRecordings[rowForItemInQueue] = data.copy() self.trackCount += len(data['metadata']['tracklist']) [ self.validRecordings.remove(None) for x in range(self.validRecordings.count(None)) ] self.failedTracks = [] if len(self.validRecordings) == 0: MessageBox.warning(self, 'Notice', 'Nothing to add') return # Prepare a list of PcmReader objects for validRecording in self.validRecordings: validRecording['pcmReaders'] = [] audioFiles = validRecording['metadata']['audioFiles'] for index, audioFile in enumerate(audioFiles): try: encoding = sys.getfilesystemencoding() audiofileObj = audiotools.open(audioFile.encode(encoding)) except audiotools.UnsupportedFile: MessageBox.critical( self, 'Error opening file', '%s is an unsupported type' % \ os.path.basename(audioFile) ) return except IOError as e: MessageBox.critical( self, 'Error opening file', 'Could not open file %s <br /><br /> %s ' % \ (os.path.basename(audioFile), e.args[1]) ) return except UnicodeDecodeError as e: MessageBox.critical( self, 'Error opening file', 'Unicode decode error <br /><br /> %s' % e.args[1]) return # If ALAC already, set the reader to None. if isinstance(audiofileObj, audiotools.ALACAudio): validRecording['pcmReaders'].append(None) else: try: pcmReader = audiofileObj.to_pcm() except Exception as e: MessageBox.critical( self, 'Error reading file', 'Could not read file: %s <br /><br /> %s' % \ (audioFile, str(e)) ) return if isinstance(pcmReader, audiotools.PCMReaderError): MessageBox.critical( self, 'Error reading file', 'Could not read file ' + os.path.basename(audioFile) + "<br /><br />" + pcmReader.error_message) return validRecording['pcmReaders'].append(pcmReader) self.antiCrashBin.append(pcmReader) self.progressBarLabel = progressBarLabel = QLabel() self.progressDialog = progressDialog = QProgressDialog( "Loading", "Cancel", 1, self.trackCount + 1, self) self.connect(self.progressDialog, SIGNAL("canceled()"), self.cancelProcess) progressDialog.setWindowTitle('BootTunes') progressDialog.setWindowFlags(Qt.Window | Qt.WindowCloseButtonHint) progressDialog.setWindowModality(Qt.WindowModal) progressDialog.setLabel(progressBarLabel) progressBarLabel.setText( 'Converting "%s"' % \ self.validRecordings[0]['metadata']['tracklist'][0] ) progressDialog.setValue(1) self.lock = QReadWriteLock() self.processThread = ConvertFilesThread(self.lock, self) self.connect(self.processThread, SIGNAL("progress(int, QString)"), self.updateProgress) self.connect(self.processThread, SIGNAL("success()"), self.conversionComplete) self.connect(self.processThread, SIGNAL("error(QString)"), self.errorInThread) self.processThread.start()