def share_post(sharing, db: Session): #sharing the file to the user as collaborator #is nothing but adding the user as collaborator #once the user is added as collaborator, the collaborator #can download it, edit and upload it. user_email = sharing.collaborator_id filename = sharing.filename access_level = sharing.access_level try: file_id = get_file_id_by_filename(filename, db) collaborator_id = get_email_id(user_email, db).id file_lock_check = LockFile(file_dir + '/' + filename) if file_lock_check.is_locked(): if is_owner(collaborator_id, file_id, db) == True: lock.release() else: return None shared_status = Collaborators(collaborator_id=collaborator_id, file_id=file_id, access_level=access_level, access_date=dt.datetime.now()) db.add(shared_status) db.commit() db.refresh(shared_status) return True except: return False
def download_document(filename, email, db): #After ownloading the file to a local server # we will be updating the downloded_on field of #Collaborators table. For version check purpose email_id = get_email_id(email, db).id file_id = get_file_id_by_filename(filename, db) print(email_id, file_id) #this part creates a lock opject with the given file path file_lock_check = LockFile(file_dir + '/' + filename) #using is_locked, we will get to know, it the file is locked or not print("the file status ", file_lock_check.is_locked()) if file_lock_check.is_locked(): if is_owner(email_id, file_id, db) == True: #if the current user is owner we, will release the lock lock.release() print("owner file status ", file_lock_check.is_locked()) else: #otherwise i returns none as the fileis alreay locked print("tje file status ", file_lock_check.is_locked()) return None try: print("downloading from here") update_status = db.query(Collaborators).filter( and_(Collaborators.collaborator_id == email_id, Collaborators.file_id == file_id)) update_status = update_status.update( {'downloaded_on': dt.datetime.now()}) db.commit() print(update_status) return True except: return False
def _get_lock_for(transcript_id): lock_dir_path = _get_visualization_dir_path(transcript_id) if not os.path.isdir(lock_dir_path): os.mkdir(lock_dir_path) return LockFile(lock_dir_path)
def check_sync(self): if not os.path.exists(COLLECTD_FILE): return lock = LockFile(COLLECTD_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: return with open(COLLECTD_FILE, "rb") as f: try: data = pickle.loads(f.read()) except Exception: data = {} lock.release() alerts = [] for k, v in list(data.items()): if k == "ctl-ha/disk_octets": title = "CTL HA link is actively used, check initiators connectivity" else: title = k if v["Severity"] == "WARNING": level = AlertLevel.WARNING else: level = AlertLevel.CRITICAL alerts.append(Alert(title, level=level)) return alerts
def save_chunk_fs(data, queue_dir): """ Save a chunk of data on the file system. Data will be serialized as messagepack. """ # todo: give option to set index manually try: ls = os.listdir(queue_dir) except OSError: if not os.path.exists(queue_dir): os.makedirs(queue_dir) ls = [] else: return False mx = 0 for s in ls: try: num = int(s) except ValueError: continue mx = max(mx, num) file_path = os.path.join(queue_dir, str(mx + 1)) lock = LockFile(file_path) with lock, open(file_path, 'wb') as f: f.write(msgpack.dumps(data)) return True
def generateQueryAndQueryVectorMap(line_tmp): sentencevector = [] # print "Number of Records Left:\t" + str(corpuscount - tmpcount) query = line_tmp.lower() component_word = query.split(' ') for one_word in component_word: if redis_handle.exists(one_word): vector_tmp = redis_handle.get(one_word) vector_final = normalize_redis_vector(vector_tmp) sentencevector.append(vector_final) #indexnum = vocab_dict.get(one_word) #sentencevector.append((repvector[indexnum]).tolist()) else: sentencevector.append([float(0)] * vector_size) l = numpy.array(sentencevector) # Centroid Calculation - each sentence. # Sums up all vectors (columns) and generates a final list (1D)of size vector_size lmt = numpy.array(l.sum(axis=0, dtype=numpy.float32)).tolist() if (lmt != 0.0): # Averages the vectors based on the number of words in each sentence. query_vector = [x / len(component_word) for x in lmt] else: query_vector = [float(0)] * vector_size filename = getRandomOutputFilename() lock = LockFile(filename) lock.acquire() # Open a file handle to the lock file fh = open(filename, 'w') fh.write(str(query) + "\t") for item in query_vector: fh.write("%s " % str(item)) fh.close() lock.release()
def IoOperat_multi(tmpfile, mode, statisticArr, chunker): # tmpfile = "value_" + md5 + ".dat" with open(tmpfile, "rb") as f: fields = pickle.load(f) samples = pickle.load(f) headers = pickle.load(f) filepath_json = pickle.load(f) recordstring, infonum, infoSpecial = chunker2string( chunker, fields, samples, mode) if sys.platform.startswith('linux'): with open(filepath_json, "a") as fp: fcntl.flock(fp.fileno(), fcntl.LOCK_EX) statisticArr[0] += chunker[1] statisticArr[1] += infonum statisticArr[2] += infoSpecial fp.write(recordstring) else: lock = LockFile(filepath_json) lock.acquire() with open(filepath_json, "a") as fp: statisticArr[0] += chunker[1] statisticArr[1] += infonum statisticArr[2] += infoSpecial fp.write(recordstring) lock.release() return
def getLockFileLength(): f = open(lockFilePath, "r") lock = LockFile(lockFilePath) with lock: lines = f.readlines() f.close() return len(lines)
def locked_cache_dir(config, cache_key, timeout=900, tag=None): if LockFile is DummyLock: cache_key = cache_key + os.environ.get('PYTEST_XDIST_WORKER', '') base_dir = config.cache.makedir(cache_key) lockfile = join(six.text_type(base_dir), 'lock') cache_dir = join(six.text_type(base_dir), 'cache') lock = LockFile(lockfile) lock.acquire(timeout=timeout) try: # Clear cache dir contents if it was generated with different # asv version tag_fn = join(six.text_type(base_dir), 'tag.json') tag_content = [asv.__version__, repr(tag)] if os.path.isdir(cache_dir): try: if util.load_json(tag_fn) != tag_content: raise ValueError() except (IOError, ValueError, util.UserError): shutil.rmtree(cache_dir) if not os.path.isdir(cache_dir): os.makedirs(cache_dir) yield cache_dir util.write_json(tag_fn, tag_content) finally: lock.release()
def addScanResult( self,\ scanResult,\ ADD_MODE = NEW_SCAN_RESULT ): lock = LockFile(self.m_TokenFileName) #ОБРАБОТКА НЕВОЗМОЖНОСТИ ДОЖДАТЬСЯ РАЗБЛОКИРОВАНИЯ ФАЙЛА lock.acquire(SECONDS_WAIT_FOR_UNLOCK) f = open(self.m_TokenFileName,\ 'r+') listScanResult = self.loadScanResults(f) idToken = 0 if (ADD_MODE == TO_EXIST_SCAN_RESULT): listScanResult.setScanResultByIdToken(scanResult) else: idToken = listScanResult.addScanResult(scanResult) f.seek(0) f.write(listScanResult.toJSON()) f.close() lock.release() return idToken
def add_shared_community_design(description, id, plate_type, pipet_type): """ Add a plate-ID to the JSON list of shared designs. """ try: if not valid_id(id): return if len(description) == 0: return lock = LockFile(community_public_file) with lock: data = [] try: data = json.load(file(community_public_file)) except: pass data.append({ "description": description, "id": id, "plate_type": plate_type, "pipet_type": pipet_type }) json.dump(data, open(community_public_file, "w")) except Exception as e: # silently ignore any errors - the new plate will simply not be added to the list sys.stderr.write( "failed to add shared community (id = '%s', exception='%s')" % (str(id), str(e)))
def start(self): ''' Start point for pika consumer run ''' #Initialize logging PikaConsumer.init_logging() PikaConsumer.logger.info("start : Initializing logging") #Intitialize locking PikaConsumer.lock = LockFile(PikaConsumer.lockfile) if PikaConsumer.lock.is_locked(): PikaConsumer.logger.error("start : Locked by another instance") print("Instance is locked by %s" % PikaConsumer.lockfile) return PikaConsumer.lock.acquire() PikaConsumer.logger.info("start : Instance locked") #Daemonize process context = daemon.DaemonContext() consumer = PikaConsumer() with context: consumer.run() #We are not suppose to get here since pika enters listen loop PikaConsumer.lock.release() PikaConsumer.logger.info("start : Instance unlocked")
def test_run_pack_lock_is_already_acquired(self): action = self.get_action_instance() temp_dir = hashlib.md5(PACK_INDEX['test']['repo_url']).hexdigest() original_acquire = LockFile.acquire def mock_acquire(self, timeout=None): original_acquire(self, timeout=0.1) LockFile.acquire = mock_acquire try: lock_file = LockFile('/tmp/%s' % (temp_dir)) # Acquire a lock (file) so acquire inside download will fail with open(lock_file.lock_file, 'w') as fp: fp.write('') expected_msg = 'Timeout waiting to acquire lock for' self.assertRaisesRegexp(LockTimeout, expected_msg, action.run, packs=['test'], abs_repo_base=self.repo_base) finally: os.unlink(lock_file.lock_file) LockFile.acquire = original_acquire
def setup_lock(self): self.execution_timer = timeit.default_timer() try: self.lock = LockFile(self.lock_file) while not self.lock.i_am_locking(): try: self.logger.debug("[Analog->Digital Converter 0x{:02X}] " "Acquiring Lock: {}".format(self.i2c_address, self.lock.path)) self.lock.acquire(timeout=60) # wait up to 60 seconds except Exception as e: self.logger.error("{cls} exception in read call. Error: " "{err}".format(cls=type(self).__name__, err=e)) self.logger.warning( "[Analog->Digital Converter 0x{:02X}] Waited 60 " "seconds. Breaking lock to acquire {}".format( self.i2c_address, self.lock.path)) self.lock.break_lock() self.lock.acquire() self.logger.debug( "[Analog->Digital Converter 0x{:02X}] Acquired Lock: " "{}".format(self.i2c_address, self.lock.path)) self.logger.debug( "[Analog->Digital Converter 0x{:02X}] Executed in " "{}ms".format(self.i2c_address, (timeit.default_timer() - self.execution_timer) * 1000)) return 1, "Success" except Exception as msg: return 0, "Analog->Digital Converter Fail: {}".format(msg)
def process(job): util.descriptor_correct(job) job_dir, in_dir, out_dir = logic.create_workdir(job) mounted_ids = [] container_id = None try: logic.get_input_files(job, in_dir) with LockFile(config.LOCK_FILE): mounted_ids, container_id = logic.create_containers( job, in_dir, out_dir) while harbor.is_running(container_id): logger.debug("Container is running. Sleeping for {} sec.".format( config.CONTAINER_CHECK_INTERVAL)) time.sleep(config.CONTAINER_CHECK_INTERVAL) logic.write_std_output(container_id, out_dir) logic.handle_output(job, out_dir) logger.debug("Setting job.status='completed'") job.status = Job.COMPLETED except Exception as e: capture_exception() traceback.print_exc() raise e finally: logic.cleanup_dir(job_dir) cnt_to_remove = mounted_ids if container_id: cnt_to_remove += [container_id] logic.cleanup_containers(cnt_to_remove)
def run(self): alerts = [] if not os.path.exists(SMART_FILE): return alerts lock = LockFile(SMART_FILE) while not lock.i_am_locking(): try: lock.acquire(timeout=5) except LockTimeout: return alerts with open(SMART_FILE, 'rb') as f: try: data = pickle.loads(f.read()) except: data = {} msg = '' for msgs in data.itervalues(): if not msgs: continue msg += '<br />\n'.join(msgs) if msg: alerts.append(Alert(Alert.CRIT, msg)) lock.release() return alerts
def setup_lock(self, i2c_address, i2c_bus, lockfile): execution_timer = timeit.default_timer() try: self.lock[lockfile] = LockFile(lockfile) while not self.lock[lockfile].i_am_locking(): try: self.logger.debug("[Locking bus-{} 0x{:02X}] Acquiring " "Lock: {}".format( i2c_bus, i2c_address, self.lock[lockfile].path)) self.lock[lockfile].acquire( timeout=60) # wait up to 60 seconds except Exception as e: self.logger.error("{cls} raised an exception: " "{err}".format(cls=type(self).__name__, err=e)) self.logger.exception( "[Locking bus-{} 0x{:02X}] Waited 60 " "seconds. Breaking lock to acquire " "{}".format(i2c_bus, i2c_address, self.lock[lockfile].path)) self.lock[lockfile].break_lock() self.lock[lockfile].acquire() self.logger.debug( "[Locking bus-{} 0x{:02X}] Acquired Lock: {}".format( i2c_bus, i2c_address, self.lock[lockfile].path)) self.logger.debug( "[Locking bus-{} 0x{:02X}] Executed in {:.1f} ms".format( i2c_bus, i2c_address, (timeit.default_timer() - execution_timer) * 1000)) return 1, "Success" except Exception as msg: return 0, "Multiplexer Fail: {}".format(msg)
def query(self, query_str): """ Send command and return reply """ lock_file_amend = '{lf}.{dev}'.format(lf=ATLAS_PH_LOCK_FILE, dev=self.serial_device.replace( "/", "-")) lock = LockFile(lock_file_amend) try: while not lock.i_am_locking(): try: lock.acquire( timeout=10 ) # wait up to 10 seconds before breaking lock except Exception as e: logger.exception( "{cls} 10 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=lock_file_amend, err=e)) lock.break_lock() lock.acquire() self.send_cmd(query_str) time.sleep(1.3) response = self.read_lines() lock.release() return response except Exception as err: logger.exception( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=err)) lock.release() return None
def save_Y(pos, move, text_count): log_move = "Y:{}:{}".format(pos.get_board_piece(move[0]), render(119-move[1])) lock = LockFile("log_Y.txt") lock.acquire() with open("log_Y.txt", "a") as log_Y: log_Y.write(str(text_count) + " " + log_move + "\n") lock.release()
def read(self): """ Takes a reading from the MH-Z19 and updates the self._co2 value :returns: None on success or 1 on error """ if not self.serial_device: # Don't measure if device isn't validated return None lock = LockFile(self.k30_lock_file) try: # Acquire lock on MHZ19 to ensure more than one read isn't # being attempted at once. while not lock.i_am_locking(): try: # wait 60 seconds before breaking lock lock.acquire(timeout=60) except Exception as e: self.logger.error( "{cls} 60 second timeout, {lock} lock broken: " "{err}".format(cls=type(self).__name__, lock=self.k30_lock_file, err=e)) lock.break_lock() lock.acquire() self._co2 = self.get_measurement() lock.release() if self._co2 is None: return 1 return # success - no errors except Exception as e: self.logger.error( "{cls} raised an exception when taking a reading: " "{err}".format(cls=type(self).__name__, err=e)) lock.release() return 1
def getResult(resultNum): global process while True: f = open(resultPath, "r") lock = LockFile(resultPath) result = None with lock: lines = f.readlines() f.close() if len(lines) > resultNum: result = lines[len(lines) - 1] if result == 'True': return True elif result == "Alice": return 'Alice' elif result == "Eve": return 'Eve' elif result == "Others": return 'Others' else: return False break elif process != None and process.poll() != None: return -1 sleep(1)
def test_run_pack_lock_is_already_acquired_force_flag(self): # Lock is already acquired but force is true so it should be deleted and released action = self.get_action_instance() temp_dir = hashlib.md5(PACK_INDEX['test']['repo_url']).hexdigest() original_acquire = LockFile.acquire def mock_acquire(self, timeout=None): original_acquire(self, timeout=0.1) LockFile.acquire = mock_acquire try: lock_file = LockFile('/tmp/%s' % (temp_dir)) # Acquire a lock (file) so acquire inside download will fail with open(lock_file.lock_file, 'w') as fp: fp.write('') result = action.run(packs=['test'], abs_repo_base=self.repo_base, force=True) finally: LockFile.acquire = original_acquire self.assertEqual(result, {'test': 'Success.'})
def check_sync(self): try: with LockFile(VMWARELOGIN_FAILS): with open(VMWARELOGIN_FAILS, "rb") as f: fails = pickle.load(f) except Exception: return alerts = [] for oid, errmsg in list(fails.items()): try: vmware = self.middleware.call_sync("datastore.query", "storage.vmwareplugin", [["id", "=", oid]], {"get": True}) except IndexError: continue alerts.append( Alert("VMWare %(vmware)s failed to login to snapshot: %(err)s", { "vmware": vmware, "err": errmsg, })) return alerts
def main(): global options parser = OptionParser() parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print status messages to stdout") parser.add_option( "-i", "--input-dir", action="store", dest="input_dir", default='/build/post_build', help="Input directory that will contain the *_source.changes files") parser.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/archive/getdeb/ubuntu', help="Output directory") (options, args) = parser.parse_args() Log.verbose = options.verbose try: lock = LockFile("ftp_incoming") except LockFile.AlreadyLockedError: Log.log("Unable to acquire lock, exiting") return # Check and process the incoming directoy check_post_build_dir()
def _lock_state_file(self): self._lock = LockFile(self.path) if (self._lock.is_locked() and (time() - getmtime(self._lock.lock_file)) > 10): self._lock.break_lock() self._lock.acquire()
def __init__(self, store, collection, package_id, path): self.store = store self.package_id = package_id self.path = path pkg_path = self.store._make_path(collection, package_id) self._abs_path = os.path.join(pkg_path, path) self._abs_dir = os.path.dirname(self._abs_path) self._lock = LockFile(self._abs_path)
def __init__(self, base_folder): self.base_folder = base_folder if not os.path.isdir(self.base_folder): with LockFile("_".join(os.path.split(self.base_folder))): os.makedirs(self.base_folder) self._cache = {}
def getResultNum(): f = open(resultPath, "r") lock = LockFile(resultPath) with lock: lines = f.readlines() f.close() resultNum = len(lines) return resultNum
def file_lock(filename): try: lock = LockFile(filename) lock.acquire(timeout=1) except: print 'lock failed' lock = None return lock
def main(): lock = LockFile("/tmp/relay") with lock: ser = serial.Serial('/dev/ttyUSB0',19200,timeout=1) ser.flushInput() ser.write('t') s = ser.readline() print s