def generateQueryAndQueryVectorMap(line_tmp):
    sentencevector = []
    # print "Number of Records Left:\t" + str(corpuscount - tmpcount)
    query = line_tmp.lower()
    component_word = query.split(' ')
    for one_word in component_word:
        if redis_handle.exists(one_word):
            vector_tmp = redis_handle.get(one_word)
            vector_final = normalize_redis_vector(vector_tmp)
            sentencevector.append(vector_final)
            #indexnum = vocab_dict.get(one_word)
            #sentencevector.append((repvector[indexnum]).tolist())
        else:
            sentencevector.append([float(0)] * vector_size)
    l = numpy.array(sentencevector)
    # Centroid Calculation - each sentence.
    # Sums up all vectors (columns) and generates a final list (1D)of size vector_size
    lmt = numpy.array(l.sum(axis=0, dtype=numpy.float32)).tolist()

    if (lmt != 0.0):
        # Averages the vectors based on the number of words in each sentence.
        query_vector = [x / len(component_word) for x in lmt]
    else:
        query_vector = [float(0)] * vector_size

    filename = getRandomOutputFilename()
    lock = LockFile(filename)
    lock.acquire()
    # Open a file handle to the lock file
    fh = open(filename, 'w')
    fh.write(str(query)+"\t")
    for item in query_vector:
        fh.write("%s " % str(item))
    fh.close()
    lock.release()
Esempio n. 2
0
	def on_post(self, req, resp, id):
		try:
			user = req.context['user']

			# Kontrola existence ulohy
			task = session.query(model.Task).get(id)
			if task is None:
				req.context['result'] = 'Neexistujici uloha'
				resp.status = falcon.HTTP_404
				return

			# Kontrola existence git_branch a git_path
			if (task.git_path is None) or (task.git_branch is None):
				req.context['result'] = 'Uloha nema zadanou gitovskou vetev nebo adresar'
				resp.status = falcon.HTTP_400
				return

			if task.git_branch == "master":
				req.context['result'] = 'Uloha je j*z ve vetvi master'
				resp.status = falcon.HTTP_400
				return

			wave = session.query(model.Wave).get(task.wave)

			# Merge mohou provadet pouze administratori a garant vlny
			if (not user.is_logged_in()) or ((not user.is_admin()) and (user.id != wave.garant)):
				req.context['result'] = 'Nedostatecna opravneni'
				resp.status = falcon.HTTP_400
				return

			# Kontrola zamku
			lock = util.lock.git_locked()
			if lock:
				req.context['result'] = 'GIT uzamcen zámkem '+lock + "\nNekdo momentalne provadi akci s gitem, opakujte prosim akci za 20 sekund."
				resp.status = falcon.HTTP_409
				return

			try:
				mergeLock = LockFile(util.admin.taskMerge.LOCKFILE)
				mergeLock.acquire(60) # Timeout zamku je 1 minuta

				# Fetch repozitare
				repo = git.Repo(util.git.GIT_SEMINAR_PATH)

				if task.git_branch in repo.heads:
					# Cannot delete branch we are on
					repo.git.checkout("master")
					repo.git.branch('-D', task.git_branch)

				task.git_branch = 'master'

				session.commit()
				resp.status = falcon.HTTP_200
			finally:
				mergeLock.release()
		except SQLAlchemyError:
			session.rollback()
			raise
		finally:
			session.close()
Esempio n. 3
0
def share_post(sharing, db: Session):

    #sharing the file to the user as collaborator
    #is nothing but adding the user as collaborator
    #once the user is added as collaborator, the collaborator
    #can download it, edit and upload it.
    user_email = sharing.collaborator_id
    filename = sharing.filename
    access_level = sharing.access_level
    try:
        file_id = get_file_id_by_filename(filename, db)
        collaborator_id = get_email_id(user_email, db).id

        file_lock_check = LockFile(file_dir + '/' + filename)

        if file_lock_check.is_locked():
            if is_owner(collaborator_id, file_id, db) == True:
                lock.release()
            else:
                return None

        shared_status = Collaborators(collaborator_id=collaborator_id,
                                      file_id=file_id,
                                      access_level=access_level,
                                      access_date=dt.datetime.now())
        db.add(shared_status)
        db.commit()
        db.refresh(shared_status)
        return True
    except:
        return False
Esempio n. 4
0
def write_ht_sensor_log(sensor_ht_read_temp_c, sensor_ht_read_hum, sensor_ht_dewpt_c, sensor):
    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)

    lock = LockFile(sensor_ht_log_lock_path)
    while not lock.i_am_locking():
        try:
            logging.debug("[Write Sensor Log] Acquiring Lock: %s", lock.path)
            lock.acquire(timeout=60)    # wait up to 60 seconds
        except:
            logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path)
            lock.break_lock()
            lock.acquire()

    logging.debug("[Write Sensor Log] Gained lock: %s", lock.path)

    try:
        with open(sensor_ht_log_file_tmp, "ab") as sensorlog:
            sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f} {4:d}\n'.format(
                datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"),
                sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor))
            logging.debug("[Write Sensor Log] Data appended to %s", sensor_ht_log_file_tmp)
    except:
        logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_ht_log_file_tmp)

    logging.debug("[Write Sensor Log] Removing lock: %s", lock.path)
    lock.release()
Esempio n. 5
0
def writeResult(result):
    f = open(resultPath, "r")
    lock = LockFile(resultPath)
    with lock:
        lines = f.readlines()
        f.close()
    resultNum = len(lines)
    f = open(resultPath, "a+")
    lock = LockFile(resultPath)
    with lock:
        if resultNum == 0:
            if result == True:
                f.write("True")
            elif result == "Alice":
                f.write("Alice")
            elif result == "Eve":
                f.write("Eve")
            elif result == "Others":
                f.write("Others")
            else:
                f.write("False")
        else:
            if result == True:
                f.write("\nTrue")
            elif result == "Alice":
                f.write("\nAlice")
            elif result == "Eve":
                f.write("\nEve")
            elif result == "Others":
                f.write("\nOthers")
            else:
                f.write("\nFalse")
        f.close()
Esempio n. 6
0
 def release(self):
     """
     Method used to release a lock using the lockfile module.
     """
     lock = LockFile(self.lockfile)
     if lock.i_am_locking():
         lock.release()
def write_relay_log(relayNumber, relaySeconds):
    config = ConfigParser.RawConfigParser()

    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)
    if not Terminate:
        lock = LockFile(relay_lock_path)
        while not lock.i_am_locking():
            try:
                logging.info("[Write Relay Log] Acquiring Lock: %s", lock.path)
                lock.acquire(timeout=60)    # wait up to 60 seconds
            except:
                logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path)
                lock.break_lock()
                lock.acquire()
        logging.info("[Write Relay Log] Gained lock: %s", lock.path)
        relay = [0] * 9
        for n in range(1, 9):
            if n == relayNumber:
                relay[relayNumber] = relaySeconds
        try:
            with open(relay_log_file_tmp, "ab") as relaylog:
                relaylog.write('{0} {1} {2} {3} {4} {5} {6} {7} {8}\n'.format(
                    datetime.datetime.now().strftime("%Y %m %d %H %M %S"), 
                    relay[1], relay[2], relay[3], relay[4],
                    relay[5], relay[6], relay[7], relay[8]))
        except:
            logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp)
        logging.info("[Write Relay Log] Removing lock: %s", lock.path)
        lock.release()
Esempio n. 8
0
        def addScanResult( self,\
                           scanResult,\
                           ADD_MODE = NEW_SCAN_RESULT ):
                
                lock = LockFile(self.m_TokenFileName)

		#ОБРАБОТКА НЕВОЗМОЖНОСТИ ДОЖДАТЬСЯ РАЗБЛОКИРОВАНИЯ ФАЙЛА
                lock.acquire( SECONDS_WAIT_FOR_UNLOCK )

                f = open(self.m_TokenFileName,\
                         'r+')
                listScanResult = self.loadScanResults( f )

                idToken = 0
                
                if ( ADD_MODE == TO_EXIST_SCAN_RESULT ):
                        listScanResult.setScanResultByIdToken( scanResult )
                else:
                        idToken = listScanResult.addScanResult( scanResult )

                f.seek(0)
                f.write( listScanResult.toJSON() )
                f.close()
                lock.release()

                return idToken
def write_sensor_log():
    config = ConfigParser.RawConfigParser()

    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)
    if not Terminate:
        lock = LockFile(sensor_lock_path)
        while not lock.i_am_locking():
            try:
                logging.info("[Write Sensor Log] Acquiring Lock: %s", lock.path)
                lock.acquire(timeout=60)    # wait up to 60 seconds
            except:
                logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path)
                lock.break_lock()
                lock.acquire()
        logging.info("[Write Sensor Log] Gained lock: %s", lock.path)
        try:
            with open(sensor_log_file_tmp, "ab") as sensorlog:
                sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f}\n'.format(
                    datetime.datetime.now().strftime("%Y %m %d %H %M %S"), 
                    tempc, humidity, dewpointc))
                logging.info("[Write Sensor Log] Data appended to %s", sensor_log_file_tmp)
        except:
            logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_log_file_tmp)
        logging.info("[Write Sensor Log] Removing lock: %s", lock.path)
        lock.release()
Esempio n. 10
0
def write_relay_log(relayNumber, relaySeconds, sensor, gpio):
    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)

    lock = LockFile(relay_log_lock_path)

    while not lock.i_am_locking():
        try:
            logging.debug("[Write Relay Log] Acquiring Lock: %s", lock.path)
            lock.acquire(timeout=60)    # wait up to 60 seconds
        except:
            logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path)
            lock.break_lock()
            lock.acquire()

    logging.debug("[Write Relay Log] Gained lock: %s", lock.path)

    try:
        with open(relay_log_file_tmp, "ab") as relaylog:
            relaylog.write('{0} {1:d} {2:d} {3:d} {4:.2f}\n'.format(
                datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"),
                sensor, relayNumber, gpio, relaySeconds))

    except:
        logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp)

    logging.debug("[Write Relay Log] Removing lock: %s", lock.path)
    lock.release()
Esempio n. 11
0
def store(email,nickname,number,rate,strs,regressions):
    # User-entered data hits the filesystem here.  
    if not validate_email(email):
        return

    newcontrib = [ bleach.clean(email), 
                   bleach.clean(nickname), 
                   bleach.clean(number), 
                   bleach.clean(rate),
                   bleach.clean(strs),
                   bleach.clean(regressions)]

    lock = LockFile("/var/local/bz-triage/contributors.cfg")
    lock.acquire()
    
    try:  
        contributors = json.load(open("/var/local/bz-triage/contributors.cfg"))
    except:
        logging.info("Failed to open the file...")
        contributors = list()

    for existing in contributors:
        if existing[0] == newcontrib[0]:
            contributors.remove(existing)
    contributors.append( newcontrib )

    with open("/var/local/bz-triage/contributors.cfg", 'w') as outfile:
        json.dump(contributors, outfile)
    lock.release()
Esempio n. 12
0
 def check_plugin_options(self):
     '''
     Checks uuid and container name in options, sets some variables
     '''
     if 'name' not in self.options or 'uuid' not in self.options:
         return False
     if 'lockfile' in self.options:
         self.lock = LockFile(self.options['lockfile'])
         self.lock.acquire()
     if 'blocker' in self.options:
         self.blocker = self.options['blocker']
     if not os.path.exists(self.mount_basedir):
         os.mkdir(self.mount_basedir)
     if 'restore' in self.options:
         self.restore = self.options['restore']
     self.job_mount_point = os.path.join(self.mount_basedir,
                                         self.options['uuid'])
     self.config_path = os.path.join("/vz/private", self.options['uuid'])
     self.disk_descriptor = os.path.join(self.config_path, "root.hdd",
                                         "DiskDescriptor.xml")
     self.fs_path = os.path.join("/vz/root", self.options['uuid'])
     if 'excluded_backup_paths' in self.options:
         relative_excludes = self.options['excluded_backup_paths'].replace(
             "'", "").split(",")
         excludes = []
         for relative_exlcude in relative_excludes:
             excludes.append(
                 os.path.join(self.job_mount_point,
                              relative_exlcude.lstrip('/')))
         self.excluded_backup_paths = excludes
     return True
Esempio n. 13
0
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
    if LockFile is DummyLock:
        cache_key = cache_key + os.environ.get('PYTEST_XDIST_WORKER', '')

    base_dir = config.cache.makedir(cache_key)

    lockfile = join(six.text_type(base_dir), 'lock')
    cache_dir = join(six.text_type(base_dir), 'cache')

    lock = LockFile(lockfile)
    lock.acquire(timeout=timeout)
    try:
        # Clear cache dir contents if it was generated with different
        # asv version
        tag_fn = join(six.text_type(base_dir), 'tag.json')
        tag_content = [asv.__version__, repr(tag)]
        if os.path.isdir(cache_dir):
            try:
                if util.load_json(tag_fn) != tag_content:
                    raise ValueError()
            except (IOError, ValueError, util.UserError):
                shutil.rmtree(cache_dir)

        if not os.path.isdir(cache_dir):
            os.makedirs(cache_dir)

        yield cache_dir

        util.write_json(tag_fn, tag_content)
    finally:
        lock.release()
Esempio n. 14
0
 def setup_lock(self):
     self.execution_timer = timeit.default_timer()
     try:
         self.lock = LockFile(self.lock_file)
         while not self.lock.i_am_locking():
             try:
                 self.logger.debug(
                     "[Analog->Digital Converter 0x{:02X}] Acquiring Lock: {}"
                     .format(self.i2c_address, self.lock.path))
                 self.lock.acquire(timeout=60)  # wait up to 60 seconds
             except:
                 self.logger.warning(
                     "[Analog->Digital Converter 0x{:02X}] Waited 60 seconds. Breaking lock to acquire {}"
                     .format(self.i2c_address, self.lock.path))
                 self.lock.break_lock()
                 self.lock.acquire()
         self.logger.debug(
             "[Analog->Digital Converter 0x{:02X}] Acquired Lock: {}".
             format(self.i2c_address, self.lock.path))
         self.logger.debug(
             "[Analog->Digital Converter 0x{:02X}] Executed in {}ms".format(
                 self.i2c_address,
                 (timeit.default_timer() - self.execution_timer) * 1000))
         return 1, "Success"
     except Exception as msg:
         return 0, "Analog->Digital Converter Fail: {}".format(msg)
Esempio n. 15
0
class Repo:
    """
    Class to deal with the metadata surrounding a Git repository
    """
    def __init__(self, parent, repo_url):
        self.parent = parent
        self.url = repo_url
        self.folder_name = os.path.splitext(os.path.basename(repo_url))[0]
        self.containing_folder = os.path.join(parent.repo_folder, self.folder_name)
        if not os.path.exists(self.containing_folder):
            os.makedirs(self.containing_folder)
        self.path = os.path.join(self.containing_folder, 'repo')
        self.lockfile_path = os.path.join(self.containing_folder, 'lock')
        self.lock = LockFile(self.lockfile_path)
        self.json_path = os.path.join(self.containing_folder, 'metadata.json')
        self.data = {}
        if os.path.exists(self.json_path):
            with open(self.json_path) as json_file:
                self.data = json.load(json_file)
        self.__git = None

    def __enter__(self):
        """
        Update context
        """
        self.lock.acquire(timeout=0)
        logger.info('Git: Updating %s', self.url)
        if not os.path.exists(self.path):
            logger.debug('Cloning %s', self.url)
            git.Git(self.containing_folder).clone(self.url, self.path)
        else:
            try:
                repo = self.git(is_updater=True)
                logger.debug('Pulling %s', self.url)
                repo.git.pull()
            except Exception as e:
                logger.debug('Re-Cloning %s because %s', self.url, str(e))
                shutil.rmtree(self.path)
                git.Git(self.containing_folder).clone(self.url, self.path)
        return self

    def __exit__(self, type, value, traceback):
        # Save the updated time
        self.data['last_updated'] = str(datetime.datetime.utcnow())
        self.save()
        logger.info('Git: Update completed for %s', self.url)
        self.lock.break_lock()

    def save(self):
        with open(self.json_path, 'w') as f:
            json.dump(self.data, f)

    def git(self, is_updater=False):
        if self.lock.is_locked() and (not self.parent.is_updater and not is_updater):
            raise AlreadyLocked('This repository is being updated, if you can see this message delete {}'.format(self.lockfile_path))
        else:
            if self.__git is None or is_updater:
                self.__git = git.Repo(self.path)
            return self.__git
Esempio n. 16
0
def file_lock(filename):
    try:
        lock = LockFile(filename)
        lock.acquire(timeout=1)
    except:
        print 'lock failed'
        lock = None
    return lock
Esempio n. 17
0
    def _lock_state_file(self):
        self._lock = LockFile(self.path)

        if (self._lock.is_locked()
                and (time() - getmtime(self._lock.lock_file)) > 10):
            self._lock.break_lock()

        self._lock.acquire()
Esempio n. 18
0
def dumpTasks(filename, tasklist):
	lock = LockFile(LOCK_FILE)
	lock.acquire()
	with open(filename, 'w') as f:
		f.write("[\n  ")
		f.write(",\n  ".join(json.dumps(task) for task in tasklist))
		f.write("\n]\n")
	lock.release()
Esempio n. 19
0
def get_auth_token(use_client_file=True, **kwargs):
    ctx.logger.info("In auth.get_auth_token")
    if use_client_file:
        if constants.AUTH_TOKEN_VALUE in ctx.instance.runtime_properties:
            # If you are here , it means that this is during bootstrap
            ctx.logger.info("In auth.get_auth_token returning token from runtime props")
            ctx.logger.info("In auth.get_auth_token token from runtime props is:{}".format(ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE]))
            return ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE]

        # Check if token file exists on the client's VM. If so, take the value from it and set it in the runtime
        ctx.logger.info("In auth.get_auth_token checking local azure file path {}".format(constants.path_to_local_azure_token_file))
        if os.path.isfile(constants.path_to_local_azure_token_file):
            # If you are here , it means that this is during bootstrap
            ctx.logger.info("{} exists".format(constants.path_to_local_azure_token_file))
            token, token_expires = get_token_from_client_file()
            ctx.logger.info("get_auth_token expiry is {} ".format(token_expires))
            ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE] = token
            ctx.instance.runtime_properties[constants.AUTH_TOKEN_EXPIRY] = token_expires
            ctx.logger.info("get_auth_token token1 is {} ".format(token))
            return token

    # From here, this is not during bootstrap, which also means that this code runs on the manager's VM.
    try:
        ctx.logger.info("In auth.get_auth_token b4 locking {}".format(constants.path_to_azure_conf))
        lock = LockFile(constants.path_to_azure_conf)
        lock.acquire()
        ctx.logger.info("{} is locked".format(lock.path))
        with open(constants.path_to_azure_conf, 'r') as f:
            json_data = json.load(f)
            token_expires = json_data["token_expires"]
            token = json_data["auth_token"]
            ctx.logger.info("get_auth_token token2 is {} ".format(token))
    except:
        raise NonRecoverableError("Failures while locking or using {}".format(constants.path_to_azure_conf))

    ctx.logger.info("In auth.get_auth_token b4 timestamp")
    timestamp = int(time.time())
    ctx.logger.info("In auth.get_auth_token timestamp is {}".format(timestamp))
    ctx.logger.info("In auth.get_auth_token token_expires1 is {}".format(token_expires))
    token_expires = int(token_expires)
    ctx.logger.info("In auth.get_auth_token token_expires2 is {}".format(token_expires))
    if token_expires-timestamp <= 600 or token_expires == 0 or token is None or token == "":
        ctx.logger.info("In auth.get_auth_token token_expires-timestamp {}".format(token_expires-timestamp))
        endpoints, payload = _get_payload_endpoints()
        token, token_expires = _get_token_value_expiry(endpoints, payload)
        ctx.logger.info("get_auth_token token3 is {} ".format(token))
        ctx.logger.info("In auth.get_auth_token b4 opening {}".format(constants.path_to_azure_conf))
        with open(constants.path_to_azure_conf, 'r+') as f:
            json_data = json.load(f)
            json_data["auth_token"] = token
            json_data["token_expires"] = token_expires
            f.seek(0)
            f.write(json.dumps(json_data))
            f.close()
    lock.release()
    ctx.logger.info("{} is released".format(lock.path))
    ctx.logger.info("get_auth_token token4 is {} ".format(token))
    return token
Esempio n. 20
0
 def sync_folder(self):
     encrypted_folder_lock = LockFile(self.encrypted_folder)
     if encrypted_folder_lock.is_locked():
         self.info("Acquiring the lock of encrypted folder...")
     with encrypted_folder_lock:
         plain_folder_lock = LockFile(self.plain_folder)
         if plain_folder_lock.is_locked():
             self.info("Acquiring the lock of plaintext folder...")
         with plain_folder_lock:
             self._do_sync_folder()
Esempio n. 21
0
    def __init__(self, path):
        try:
            from lockfile import LockFile
        except ImportError:
            from lockfile import FileLock
            # Different naming in older versions of lockfile
            LockFile = FileLock

        self.path = path
        self.lock = LockFile(path)
Esempio n. 22
0
    def _lock_state_file(self):
        if not self.lock:
            return
        self._lockfile = LockFile(self.path)

        if (self._lockfile.is_locked()
                and (time() - getmtime(self._lockfile.lock_file)) > 10):
            self._lockfile.break_lock()

        self._lockfile.acquire()
Esempio n. 23
0
    def _download_rpm(self, nvr, arch):
        if nvr is None or arch is None:
            raise ValueError("Invalid option passed to connector")

        filename = '%s.%s.rpm' % (nvr, arch)
        file_path = os.path.split(filename)
        if file_path[0] != '':
            raise ValueError("Nvr can not contain path elements")
        if len(arch.split('/')) != 1 or os.path.split(arch)[0] != '':
            raise ValueError("Arch can not contain path elements")

        rpm_file_path = os.path.join(self._rpm_cache, filename)
        if os.path.exists(rpm_file_path):
            return rpm_file_path

        lockfile = LockFile(file_path)
        if lockfile.is_locked():
            # block until the lock is released and then assume other
            # thread was successful
            lockfile.acquire()
            lockfile.release()
            return rpm_file_path

        # acquire the lock and release when done
        lockfile.acquire()
        try:
            info = self.call('getBuild', {'buildInfo': nvr})
            if info is None:
                return {'error': 'No such build (%s)' % filename}

            if not os.path.exists(self._rpm_cache):
                os.mkdir(self._rpm_cache,)

            url = '%s/%s/%s/%s/%s/%s' % (
                self._koji_pkg_url, info['name'], info['version'],
                info['release'], arch, filename)

            url_file = grabber.urlopen(url, text=filename)
            out = os.open(
                rpm_file_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0666)
            try:
                while 1:
                    buf = url_file.read(4096)
                    if not buf:
                        break
                    os.write(out, buf)
            except Exception as e:
                raise e
            finally:
                os.close(out)
                url_file.close()
        finally:
            lockfile.release()

        return rpm_file_path
Esempio n. 24
0
class MCP342x_read(object):
    def __init__(self, logger, address, channel, gain, resolution):
        self.logger = logger
        self.i2c_address = address
        self.channel = channel
        self.gain = gain
        self.resolution = resolution
        if GPIO.RPI_INFO['P1_REVISION'] in [2, 3]:
            self.I2C_bus_number = 1
        else:
            self.I2C_bus_number = 0
        self.bus = smbus.SMBus(self.I2C_bus_number)
        self.lock_file = "/var/lock/mycodo_adc_0x{:02X}.pid".format(
            self.i2c_address)

    def setup_lock(self):
        self.execution_timer = timeit.default_timer()
        try:
            self.lock = LockFile(self.lock_file)
            while not self.lock.i_am_locking():
                try:
                    self.logger.debug(
                        "[Analog->Digital Converter 0x{:02X}] Acquiring Lock: {}"
                        .format(self.i2c_address, self.lock.path))
                    self.lock.acquire(timeout=60)  # wait up to 60 seconds
                except:
                    self.logger.warning(
                        "[Analog->Digital Converter 0x{:02X}] Waited 60 seconds. Breaking lock to acquire {}"
                        .format(self.i2c_address, self.lock.path))
                    self.lock.break_lock()
                    self.lock.acquire()
            self.logger.debug(
                "[Analog->Digital Converter 0x{:02X}] Acquired Lock: {}".
                format(self.i2c_address, self.lock.path))
            self.logger.debug(
                "[Analog->Digital Converter 0x{:02X}] Executed in {}ms".format(
                    self.i2c_address,
                    (timeit.default_timer() - self.execution_timer) * 1000))
            return 1, "Success"
        except Exception as msg:
            return 0, "Analog->Digital Converter Fail: {}".format(msg)

    def release_lock(self):
        self.lock.release()

    def read(self):
        try:
            time.sleep(0.1)
            self.setup_lock()
            adc = MCP342x(self.bus,
                          self.i2c_address,
                          channel=self.channel - 1,
                          gain=self.gain,
                          resolution=self.resolution)
            response = adc.convert_and_read()
            self.release_lock()
            return 1, response
        except Exception as msg:
            self.release_lock()
            return 0, "Fail: {}".format(msg)
Esempio n. 25
0
	def on_post(self, req, resp, id):
		try:
			user = req.context['user']

			# Kontrola opravneni
			if (not user.is_logged_in()) or (not user.is_org()):
				req.context['result'] = 'Nedostatecna opravneni'
				resp.status = falcon.HTTP_400
				return

			# Kontrola existence ulohy
			task = session.query(model.Task).get(id)
			if task is None:
				req.context['result'] = 'Neexistujici uloha'
				resp.status = falcon.HTTP_404
				return

			# Zverejnene ulohy mohou deployovat pouze admini
			wave = session.query(model.Wave).get(task.wave)
			if (datetime.datetime.utcnow() > wave.time_published) and (not user.is_admin()):
				req.context['result'] = 'Po zverejneni ulohy muze deploy provest pouze administrator'
				resp.status = falcon.HTTP_404
				return

			# Kontrola existence gitovske vetve a adresare v databazi
			if (task.git_branch is None) or (task.git_path is None):
				req.context['result'] = 'Uloha nema zadanou gitovskou vetev nebo adresar'
				resp.status = falcon.HTTP_400
				return

			# Kontrola zamku
			lock = util.lock.git_locked()
			if lock:
				req.context['result'] = 'GIT uzamcen zamkem ' + lock + "\nNekdo momentalne provadi akci s gitem, opakujte prosim akci za 20 sekund."
				resp.status = falcon.HTTP_409
				return

			# Stav na deploying je potreba nastavit v tomto vlakne
			task.deploy_status = 'deploying'
			session.commit()

			try:
				deployLock = LockFile(util.admin.taskDeploy.LOCKFILE)
				deployLock.acquire(60) # Timeout zamku je 1 minuta
				deployThread = threading.Thread(target=util.admin.taskDeploy.deploy, args=(task.id, deployLock, scoped_session(_session)), kwargs={})
				deployThread.start()
			finally:
				deployLock.release()

			resp.status = falcon.HTTP_200
		except SQLAlchemyError:
			session.rollback()
			raise
		finally:
			session.close()
def restart():  # this really could be health
    lock = None
    try:
        lock = LockFile('json/health.json', 'r')
        lock.acquire()
        with open('json/health.json', 'r') as json_file:

            data = json.load(json_file, encoding='utf-8')

            if request.method == 'POST':
                status = request.args.get('status', type=str)

                if status is None:
                    print 'no status given, defaults to true'
                    status = 'true'

                data['restart'] = status

                with open('json/health.json', 'w') as json_file:
                    json_file.write(json.dumps(data))
                    lock.release()
                    return 'restart set to %s' % status
            if request.method == 'GET':
                lock.release()
                return data['restart']

    except IOError:
        if lock is not None:
            lock.release()
        return Messages.inventoryNotFound()
    def pre_process(self, params):
        """
        Converts the files

        First pass is to create striped tiff using kakadu if available
        and second pass is to convert to tiled tiff.

        A third file path is used for lock, if the lock can be acquired
        and the output is not ready then create it.  If the lock cannot
        be acquired then perhaps other process is processing it.

        TODO: Decide when to declare not possible ?
        """
        # Split the requested filename
        dirname, filename = os.path.split(params["fname"])
        _, ext = os.path.splitext(filename)

        # assert that ext is as expected
        assert ext in [".jp2", ".j2k"]

        output1 = os.path.join(dirname, filename + "_striped.tif")
        output2 = os.path.join(dirname, filename + "_tiled.tif")
        lock_path = os.path.join(dirname, filename + ".lock")

        lock = LockFile(lock_path)
        # logger.info('waiting for lock')
        lock.acquire()
        # If the file is missing then create it
        if not os.path.exists(output2):
            # Make sure the processing lock can be acquired
            logger.info('processing')

            logger.info('# Convert to striped tiff')
            if self.kakadu_dir is None:
                params = ["gdal_translate", params["fname"], output1]
                subprocess.call(params)
            else:
                # Additional LD_LIBRARY_PATH
                environ = os.environ.copy()

                if "LD_LIBRARY_PATH" not in environ:
                    environ["LD_LIBRARY_PATH"] = ""

                environ["LD_LIBRARY_PATH"] = self.kakadu_dir + ":" + environ["LD_LIBRARY_PATH"]
                params = [os.path.join(self.kakadu_dir, "kdu_expand"), "-i", params["fname"], "-o", output1]
                subprocess.call(params, env=environ)

            logger.info('# Convert to tiled tiff')
            params = ["gdal_translate", "-co", "TILED=YES", "-co", "COMPRESS=JPEG", output1, output2]
            subprocess.call(params)

            # Then remove output1
            os.remove(output1)
        lock.release()
        return output2
Esempio n. 28
0
def main():

    lock = LockFile(SMART_FILE)
    while not lock.i_am_locking():
        try:
            lock.acquire(timeout=5)
        except LockTimeout:
            lock.break_lock()

    data = {}
    if os.path.exists(SMART_FILE):
        with open(SMART_FILE, 'rb') as f:
            try:
                data = pickle.loads(f.read())
            except:
                pass

    device = os.environ.get('SMARTD_DEVICE')
    if device not in data:
        data[device] = []

    message = os.environ.get('SMARTD_MESSAGE')
    if message not in data[device]:
        data[device].append(message)

    with open(SMART_FILE, 'wb') as f:
        f.write(pickle.dumps(data))

    lock.release()
Esempio n. 29
0
def loadTasks(filename):
	lock = LockFile(LOCK_FILE)
	lock.acquire()
	with open(filename) as f:
		content = f.read()
	if len(content.strip()) == 0:
		ret = []
	else:
		ret = json.loads(content)
	lock.release()
	return ret
Esempio n. 30
0
    def _lock_dbindex(self):
        self._lockfile = LockFile(self.cache_dir)
        if self._lockfile.is_locked() and \
                (time() - getmtime(self._lockfile.lock_file)) > 10:
            self._lockfile.break_lock()

        try:
            self._lockfile.acquire()
        except LockFailed:
            return False

        return True
Esempio n. 31
0
    def build_execute_kitlist(self):
        try:
            strEnvironmentFolderPAth = os.path.abspath(
                __file__ + "/../../../") + "/02_Manager_Tier/EnviromentFile/"
            self.strAndroidDeviceJsonFilePath = strEnvironmentFolderPAth + '/android_device_list.json'
            self.lock = LockFile(self.strAndroidDeviceJsonFilePath)
            deviceUtils.create_android_device_json()
            time.sleep(10)
            deviceUtils.install_app_android_device(strAndroidAppFilePath)
            utils.setAttribute_KitBatch('batch_execution', 'status', 'YES')
            oBKitJsonDict = self.get_kit_batch_json()
            self.current_batch_id = oBKitJsonDict["kit_batch"][
                'current_batch_id']
            oKitList = oBKitJsonDict["kit_batch"]['list_of_batches'][
                self.current_batch_id]['list_of_kits']
            if len(oKitList.keys()) > 0:
                utils.setAttribute_KitBatch(
                    'batch_execution', 'current_batch_result_folder',
                    self.current_batch_id + '_' + self.getTimeStamp(True))
                #Main Summary result
                self.Batch_Execution_Summary_Initialize()
                oKitPriority = {}
                #List Kits based on priority
                for current_kit_id in oKitList:
                    oKitPriority[int(
                        oKitList[current_kit_id]['priority'])] = current_kit_id
                #Trigger the Execution for the kits
                for oPKey in sorted(oKitPriority.keys()):
                    #self.trigger_parallel_kit_execution(oKitPriority[oPKey])
                    # Start the Individual Kit parallel execution

                    parallelExecThread = threading.Thread(
                        target=self.trigger_parallel_kit_execution,
                        args=(oKitPriority[oPKey], ))
                    parallelExecThread.daemon = True  # This kills the thread when main program exits
                    parallelExecThread.start()
                    parallelExecThread.name = oKitPriority[oPKey]
                    jobs.append(parallelExecThread)
                    time.sleep(50)
                for oJob in jobs:
                    oJob.join()
                #Footer for Batch summary report
                self.Batch_Execution_Summary_Footer()

            else:
                print('No kits in the Selected batch')
            utils.setAttribute_KitBatch('batch_execution', 'status', 'NO')
        except:
            print(
                'Batch Execution: Exception in build_execute_kitlist Method\n {0}'
                .format(traceback.format_exc().replace('File', '$~File')))
            utils.setAttribute_KitBatch('batch_execution', 'status', 'NO')
Esempio n. 32
0
	def on_post(self, req, resp, id):
		try:
			user = req.context['user']

			if (not user.is_logged_in()) or (not user.is_org()):
				resp.status = falcon.HTTP_400
				return

			# Kontrola zamku
			lock = util.lock.git_locked()
			if lock:
				req.context['result'] = 'GIT uzamcen zamkem ' + lock + "\nNekdo momentalne provadi akci s gitem, opakujte prosim akci za 20 sekund."
				resp.status = falcon.HTTP_409
				return

			pullLock = LockFile(util.admin.waveDiff.LOCKFILE)
			pullLock.acquire(60) # Timeout zamku je 1 minuta

			# Fetch
			repo = git.Repo(util.git.GIT_SEMINAR_PATH)
			repo.remotes.origin.fetch()

			# Ulohy ve vlne
			tasks = session.query(model.Task).\
				filter(model.Task.wave == id).all()

			# Diffujeme adresare uloh task.git_commit oproti HEAD
			for task in tasks:
				if (not task.git_branch) or (not task.git_path) or (not task.git_commit):
					task.deploy_status = 'default'
					continue

				# Checkout && pull vetve ve ktere je uloha
				repo.git.checkout(task.git_branch)
				repo.remotes.origin.pull()

				# Kontrola existence adresare ulohy
				if os.path.isdir(util.git.GIT_SEMINAR_PATH+task.git_path):
					hcommit = repo.head.commit
					diff = hcommit.diff(task.git_commit, paths=[task.git_path])
					if len(diff) > 0: task.deploy_status = 'diff'
				else:
					task.deploy_status = 'default'

			session.commit()
		except SQLAlchemyError:
			session.rollback()
			req.context['result'] = 'Nastala vyjimka backendu'
			raise
		finally:
			pullLock.release()
			session.close()
    def pre_process(self, params):
        """
        Converts the files

        First pass is to create striped tiff using kakadu if available
        and second pass is to convert to tiled tiff.

        A third file path is used for lock, if the lock can be acquired
        and the output is not ready then create it.  If the lock cannot
        be acquired then perhaps other process is processing it.

        TODO: Decide when to declare not possible ?
        """
        # Split the requested filename
        dirname, filename = os.path.split(params["fname"])
        name, ext = os.path.splitext(filename)

        # assert that ext is as expected
        assert ext in [".tif", ".tiff"]

        # try reading from the openslidereader first
        try:
            tempreader = OpenslideReader()
            tempreader.set_input_params(params)
            logger.info("No preprocess needed")
            return params["fname"]

        except Exception as e:
#             print str(type(e))
            pass
            # continue

        output1 = os.path.join(dirname, name + "_tiled.tif")
        lock_path = os.path.join(dirname, filename + ".lock")

        lock = LockFile(lock_path)
        # logger.info('waiting for lock')
        lock.acquire()
        # If the file is missing then create it
        if not os.path.exists(output1):
            # Make sure the processing lock can be acquired
            logger.info('processing')
            logger.info('# Convert to tiled tiff')

            params = ["vips", "tiffsave", params["fname"], output1,
                      "--compression=jpeg", "--tile", "--tile-width=256",
                      "--tile-height=256", "--bigtiff"]

            subprocess.call(params)

        lock.release()
        return output1
Esempio n. 34
0
    def main(username, password):
        from . import logging_utils
        logging_utils.configure_logging()
        from .navigation import Leifur
        from .config import get_config, set_config, get_config_from_user
        from .connectivity import check_internet_connection
        from .update_checker import check_for_updates

        lock = LockFile('/tmp/spoppy')

        try:
            try:
                # Try for 1s to acquire the lock
                lock.acquire(1)
            except LockTimeout:
                click.echo('Could not acquire lock, is spoppy running?')
                click.echo(
                    'If you\'re sure that spoppy is not running, '
                    'try removing the lock file %s' % lock.lock_file
                )
                click.echo(
                    'You can try removing the lock file by responding [rm]. '
                    'spoppy will exit on all other inputs'
                )
                try:
                    response = raw_input('')
                except NameError:
                    response = input('')
                if response == 'rm':
                    lock.break_lock()
                else:
                    raise TypeError('Could not get lock')
        except TypeError:
            pass
        else:
            check_internet_connection()
            # Check for updates
            check_for_updates(click, get_version(), lock)

            if username and password:
                set_config(username, password)
            else:
                username, password = get_config()
            if not (username and password):
                username, password = get_config_from_user()

            navigator = None
            try:
                navigator = Leifur(username, password)
                navigator.start()
            finally:
                if navigator:
                    navigator.shutdown()
                logger.debug('Finally, bye!')
        finally:
            if lock.i_am_locking():
                lock.release()
Esempio n. 35
0
    def list_stale_environment_clones(self):
        """ Lists environment clones left behind on disk and no longer used

        Stale clones could be left behind because of an error during a previous run.

        An environment is defined as stale if it fits the name of a managed environment
        with a suffix, and is not pointed at by any symlinks

        :return: list(str)
        """
        links = {}
        candidates = []
        stale_clones = []

        items = os.listdir(self.environment_dir)
        for item in items:
            # Ignore hidden files
            if item.startswith('.'):
                continue

            # Explicitly ignore the master repo name
            if item == self.master_repo_name:
                continue

            # Ignore anything matching the blacklist pattern
            if self.blacklist.match(item):
                self.logger.debug(
                    "Ignoring blacklisted environment {0}".format(item))
                continue

            path = os.path.join(self.environment_dir, item)
            if os.path.islink(path):
                links[os.readlink(path)] = path
            elif os.path.isdir(path):
                candidates.append(path)

        # Look for candidate environments which aren't the target of any symlinks
        for candidate in candidates:
            if candidate not in links:
                environment_path = self.environment_repo_path(
                    self.identify_environment_name_from_clone_name(
                        self.identify_environment_name_from_path(candidate)))
                lock = LockFile(environment_path)
                if lock.is_locked():
                    # Ignore locked environments, might be in use
                    continue

                self.logger.debug(
                    "Stale environment detected: {0}".format(candidate))
                stale_clones.append(candidate)

        return stale_clones
Esempio n. 36
0
def append_line_to_mycodo_log(log_file_path, log_lock_path, log_line):
    """
    Appends given line to log file.

    :return:
    :rtype:

    :param log_file_path: Path to the Log File
    :type log_file_path: str
    :param log_lock_path: Path to the Lock File
    :type log_lock_path: str
    :param log_line: String to write to the Log File
    :type log_line: str
    """
    lock = LockFile(log_lock_path)
    while not lock.i_am_locking():
        try:
            logging.debug("[Write Sensor Log] Acquiring Lock: {}".format(lock.path))
            lock.acquire(timeout=60)  # wait up to 60 seconds
        except:  # TODO Needs better catch statement
            logging.warning("[Write Sensor Log] Breaking Lock to Acquire: {}".format(lock.path))
            lock.break_lock()
            lock.acquire()
        finally:
            logging.debug("[Write Sensor Log] Gained lock: {}".format(lock.path))
    try:
        with open(log_file_path, "ab") as sensorlog:
            pass
            sensorlog.write(log_line + "\n")

            # Temperature:
            # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {}'.format(now(), sensor_t_read_temp_c[sensor], sensor))

            # Temperature/Humidity:
            # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {:.1f} {:.1f} {}'.format(now(), sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor))

            # CO2
            # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {} {}'.format(now(), sensor_co2_read_co2[sensor], sensor))

            # Pressure
            # sensorlog.write('{"%Y/%m/%d-%H:%M:%S"} {:.1f} {} {:.1f} {}'.format(now(), sensor_press_read_temp_c[sensor], sensor_press_read_press[sensor], sensor_press_read_alt[sensor], sensor))

            # Relay
            # relaylog.write('{"%Y/%m/%d-%H:%M:%S"} {} {} {} {:.2f}'.format(now(), sensor, relayNumber, gpio, relaySeconds))

            logging.debug("[Write Sensor Log] Data appended to {}".format(
                          log_file_path))
    except:  # TODO Needs better catch statement
        logging.warning("[Write Sensor Log] Unable to append data to %s",
                        log_file_path)
    logging.debug("[Write Sensor Log] Removing lock: {}".format(lock.path))
    lock.release()
Esempio n. 37
0
    def _lock_state_file(self):
        if not self.lock:
            return
        self._lockfile = LockFile(self.path)

        if self._lockfile.is_locked() and \
                (time() - getmtime(self._lockfile.lock_file)) > 10:
            self._lockfile.break_lock()

        try:
            self._lockfile.acquire()
        except LockFailed:
            raise exception.HomeDirPermissionsError(dirname(self.path))
Esempio n. 38
0
    def __enter__(self):
        self._lock = LockFile(self.QUEUE_FILE)
        while not self._lock.i_am_locking():
            try:
                self._lock.acquire(timeout=330)
            except LockTimeout:
                self._lock.break_lock()

        if not os.path.exists(self.QUEUE_FILE):
            open(self.QUEUE_FILE, 'a').close()

        self._get_queue()
        return self
Esempio n. 39
0
    def __init__(self,
                 interface,
                 device_loc=None,
                 baud_rate=None,
                 i2c_address=None,
                 i2c_bus=None):
        super(MHZ16Sensor, self).__init__()
        self.k30_lock_file = None
        self._co2 = 0
        self.interface = interface

        if self.interface == 'UART':
            self.logger = logging.getLogger(
                "mycodo.sensors.mhz16.{dev}".format(
                    dev=device_loc.replace('/', '')))
            # Check if device is valid
            self.serial_device = is_device(device_loc)
            if self.serial_device:
                try:
                    self.k30_lock_file = "/var/lock/sen-mhz16-{}".format(
                        device_loc.replace('/', ''))
                    self.lock = LockFile(self.k30_lock_file)
                    self.ser = serial.Serial(self.serial_device,
                                             baudrate=baud_rate,
                                             timeout=1)
                except serial.SerialException:
                    self.logger.exception('Opening serial')
            else:
                self.logger.error(
                    'Could not open "{dev}". '
                    'Check the device location is correct.'.format(
                        dev=device_loc))

        elif self.interface == 'I2C':
            self.logger = logging.getLogger(
                "mycodo.sensors.mhz16.{dev}".format(dev=i2c_address))
            self.cmd_measure = [
                0xFF, 0x01, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63
            ]
            self.IOCONTROL = 0X0E << 3
            self.FCR = 0X02 << 3
            self.LCR = 0X03 << 3
            self.DLL = 0x00 << 3
            self.DLH = 0X01 << 3
            self.THR = 0X00 << 3
            self.RHR = 0x00 << 3
            self.TXLVL = 0X08 << 3
            self.RXLVL = 0X09 << 3
            self.i2c_address = i2c_address
            self.i2c = smbus.SMBus(i2c_bus)
            self.begin()
Esempio n. 40
0
def relay_command(command, rid):
    if (rid and rid != -1 and not stateless_command(command)):
        state = relay_status(rid)
    else:
        state = -1

    if (command == 'g'):
        if (state == -1):
            print "Will return an invalid status"
        return '1' if state else '0'

    lock = LockFile("/tmp/relay", timeout=5)
    try:
        with lock:
            log = open(log_file, "a")
            log.write(">" + str(command) + " " + str(rid) + "\n")
            ser = serial.Serial('/dev/ttyUSB0', 19200, timeout=1)
            ser.flush()
            ser.write(command)
            if (rid >= 0):
                ser.write(chr((int(rid) >> 8) & 0xff))
                ser.write(chr((int(rid)) & 0xff))
                log.close()
            else:
                response = ser.readline()
                log.write("<" + response + "\n")
                log.close()
                return response
    except LockTimeout:
        lock.break_lock()
    if state != -1 and (int(rid) in http_url):
        url, consumption = http_url[int(rid)]
    else:
        url = False

    if (url != False):
        try:
            if (command == 'A' and not state):
                urllib2.urlopen(url + str(float(0)))
                urllib2.urlopen(url + str(float(consumption)))
            elif (command == 'E' and state):
                urllib2.urlopen(url + str(float(consumption)))
                urllib2.urlopen(url + str(float(0)))
        except Exception:
            pass

    if (rid != -1):
        update_status(rid, command)
    return ''
Esempio n. 41
0
    def open(self, filename):
        filepath = os.path.join(self._get_base_path(), filename)

        # print("Opening file database {0}".format(filepath))

        self.lock = LockFile(filepath)
        while not self.lock.i_am_locking():
            try:
                self.lock.acquire(timeout=10)  # wait up to 60 seconds
            except LockTimeout:
                self.lock.break_lock()
                self.lock.acquire()

        # Any exception here (e.g., DBAccessError) should be handled by the caller
        self.db = shelve.open(filepath)
Esempio n. 42
0
        def getScanResult( self,\
                           idToken ):
                
                lock = LockFile(self.m_TokenFileName)
                # пока что так...
                lock.acquire( SECONDS_WAIT_FOR_UNLOCK )
                f = open(self.m_TokenFileName,\
                         'r+')
                listScanResult = self.loadScanResults( f )
                f.close()
                lock.release()
                
                scanResult = listScanResult.getScanResultByIdToken( idToken )

                return scanResult
Esempio n. 43
0
    def check_sync(self):
        try:
            with LockFile(VMWARELOGIN_FAILS):
                with open(VMWARELOGIN_FAILS, "rb") as f:
                    fails = pickle.load(f)
        except Exception:
            return

        alerts = []
        for oid, errmsg in list(fails.items()):
            try:
                vmware = self.middleware.call_sync("datastore.query",
                                                   "storage.vmwareplugin",
                                                   [["id", "=", oid]],
                                                   {"get": True})
            except IndexError:
                continue

            alerts.append(
                Alert(VMWareLoginFailedAlertClass, {
                    "hostname": vmware["hostname"],
                    "error": errmsg,
                }))

        return alerts
Esempio n. 44
0
 def consume(self, msg):
     print "****** STARTING CONSUME"
     # ignore the message, we do what we want
     lock = LockFile(app.config['RUNNER_LOCKFILE'])
     with lock:
         # get list of open jobs
         while True:
             jobs = Job.query.filter_by(status=dgrepm.STATUS_FREE)
             if jobs.count() == 0:
                 break
             for job in jobs:
                 # run query on jobs
                 dq = DataQuery.from_database(job)
                 job.set_status(dgrepm.STATUS_OPEN)
                 try:
                     job.filename = dq.run_query(
                         'datagrepper_{0}'.format(job.id))
                 except:
                     job.set_status(dgrepm.STATUS_FAILED)
                 else:
                     job.set_status(dgrepm.STATUS_DONE)
         # get list of completed jobs to be deleted
         jobs = Job.query.filter(
             Job.status == dgrepm.STATUS_DONE,
             Job.complete_time < (datetime.now() - app.config['JOB_EXPIRY'])
         )
         for job in jobs:
             os.remove(os.path.join(app.config['JOB_OUTPUT_DIR'],
                                    job.filename))
             job.set_status(dgrepm.STATUS_DELETED)
     print "****** FINISHING CONSUME"
Esempio n. 45
0
    def run(self, packs, abs_repo_base, verifyssl=True, force=False):
        result = {}

        for pack in packs:
            pack_url, pack_version = self._get_repo_url(pack)

            temp_dir = hashlib.md5(pack_url).hexdigest()

            with LockFile('/tmp/%s' % (temp_dir)):
                try:
                    user_home = os.path.expanduser('~')
                    abs_local_path = os.path.join(user_home, temp_dir)
                    self._clone_repo(temp_dir=abs_local_path,
                                     repo_url=pack_url,
                                     verifyssl=verifyssl,
                                     ref=pack_version)

                    pack_ref = self._get_pack_ref(abs_local_path)

                    # Verify that the pack version if compatible with current StackStorm version
                    if not force:
                        self._verify_pack_version(pack_dir=abs_local_path)

                    result[pack_ref] = self._move_pack(abs_repo_base, pack_ref,
                                                       abs_local_path)
                finally:
                    self._cleanup_repo(abs_local_path)

        return self._validate_result(result=result, repo_url=pack_url)
Esempio n. 46
0
    def test_run_pack_lock_is_already_acquired(self):
        action = self.get_action_instance()
        temp_dir = hashlib.md5(
            PACK_INDEX['test']['repo_url'].encode()).hexdigest()

        original_acquire = LockFile.acquire

        def mock_acquire(self, timeout=None):
            original_acquire(self, timeout=0.1)

        LockFile.acquire = mock_acquire

        try:
            lock_file = LockFile('/tmp/%s' % (temp_dir))

            # Acquire a lock (file) so acquire inside download will fail
            with open(lock_file.lock_file, 'w') as fp:
                fp.write('')

            expected_msg = 'Timeout waiting to acquire lock for'
            self.assertRaisesRegexp(LockTimeout,
                                    expected_msg,
                                    action.run,
                                    packs=['test'],
                                    abs_repo_base=self.repo_base)
        finally:
            os.unlink(lock_file.lock_file)
            LockFile.acquire = original_acquire
Esempio n. 47
0
    def test_run_pack_lock_is_already_acquired_force_flag(self):
        # Lock is already acquired but force is true so it should be deleted and released
        action = self.get_action_instance()
        temp_dir = hashlib.md5(
            PACK_INDEX['test']['repo_url'].encode()).hexdigest()

        original_acquire = LockFile.acquire

        def mock_acquire(self, timeout=None):
            original_acquire(self, timeout=0.1)

        LockFile.acquire = mock_acquire

        try:
            lock_file = LockFile('/tmp/%s' % (temp_dir))

            # Acquire a lock (file) so acquire inside download will fail
            with open(lock_file.lock_file, 'w') as fp:
                fp.write('')

            result = action.run(packs=['test'],
                                abs_repo_base=self.repo_base,
                                force=True)
        finally:
            LockFile.acquire = original_acquire

        self.assertEqual(result, {'test': 'Success.'})
Esempio n. 48
0
def check_q():
    now = time.time()
    with LockFile(MAP_FILE):
        if time.time() - now > 60:
            logging.info("Took too long to acquire lock; letting go...")
            return
        actually_check_q()
Esempio n. 49
0
def pickleNodeVMDictionary(dictionary):
    lock = LockFile("/var/lib/virtdc/framework/host_vm_dict.pkl")
    with lock:
        #print "I locked", lock.path
        with open('/var/lib/virtdc/framework/host_vm_dict.pkl',
                  'w') as host_vm_pickle_out:
            pickle.dump(dictionary, host_vm_pickle_out)
Esempio n. 50
0
def load_environment_file(envfile,base_dir, key_length=64):
    config = None
    lock = LockFile(base_dir)

    with lock:
        if not os.path.exists(envfile):
            # Create empty file if it doesn't exists
            config = ConfigParser()
            config.add_section('django')
            config['django']['secret_key'] = get_random_string(key_length, VALID_KEY_CHARS)

            with open(envfile, 'w') as configfile:
                config.write(configfile)

        if not config:
            config = ConfigParser()
            config.read_file(open(envfile))

        if not config.has_section('django'):
            raise ImproperlyConfigured('Missing `django` section in the environment file.')

        if not config.get('django', 'secret_key', fallback=None):
            raise ImproperlyConfigured('Missing `secret_key` in django section in the environment file.')

        # Register all keys as environment variables
        for key, value in config.items('django'):
            envname = 'DJANGO_%s' % key.upper()  # Prefix to avoid collisions with existing env variables
            if envname not in os.environ:  # Don't replace existing defined variables
                os.environ[envname] = value
Esempio n. 51
0
 def setup_lock(self, i2c_address, i2c_bus, lockfile):
     self.execution_timer = timeit.default_timer()
     try:
         self.lock[lockfile] = LockFile(lockfile)
         while not self.lock[lockfile].i_am_locking():
             try:
                 self.logger.debug("[Locking bus-{} 0x{:02X}] Acquiring "
                                   "Lock: {}".format(
                                       i2c_bus, i2c_address,
                                       self.lock[lockfile].path))
                 self.lock[lockfile].acquire(
                     timeout=60)  # wait up to 60 seconds
             except:
                 self.logger.exception(
                     "[Locking bus-{} 0x{:02X}] Waited 60 "
                     "seconds. Breaking lock to acquire "
                     "{}".format(i2c_bus, i2c_address,
                                 self.lock[lockfile].path))
                 self.lock[lockfile].break_lock()
                 self.lock[lockfile].acquire()
         self.logger.debug(
             "[Locking bus-{} 0x{:02X}] Acquired Lock: {}".format(
                 i2c_bus, i2c_address, self.lock[lockfile].path))
         self.logger.debug(
             "[Locking bus-{} 0x{:02X}] Executed in {:.1f} ms".format(
                 i2c_bus, i2c_address,
                 (timeit.default_timer() - self.execution_timer) * 1000))
         return 1, "Success"
     except Exception as msg:
         return 0, "Multiplexer Fail: {}".format(msg)
Esempio n. 52
0
 def __init__(self, parent, repo_url):
     self.parent = parent
     self.url = repo_url
     self.folder_name = os.path.splitext(os.path.basename(repo_url))[0]
     self.containing_folder = os.path.join(parent.repo_folder, self.folder_name)
     if not os.path.exists(self.containing_folder):
         os.makedirs(self.containing_folder)
     self.path = os.path.join(self.containing_folder, 'repo')
     self.lockfile_path = os.path.join(self.containing_folder, 'lock')
     self.lock = LockFile(self.lockfile_path)
     self.json_path = os.path.join(self.containing_folder, 'metadata.json')
     self.data = {}
     if os.path.exists(self.json_path):
         with open(self.json_path) as json_file:
             self.data = json.load(json_file)
     self.__git = None
Esempio n. 53
0
    def main(username, password):
        # Ignore error, logging set up in logging utils
        from . import logging_utils
        from .navigation import Leifur
        from .config import get_config, set_config, get_config_from_user
        from .connectivity import check_internet_connection
        from .update_checker import check_for_updates

        lock = LockFile('/tmp/spoppy')

        try:
            try:
                # Try for 1s to acquire the lock
                lock.acquire(1)
            except LockTimeout:
                click.echo('Could not acquire lock, is spoppy running?')
                click.echo(
                    'If you\'re sure that spoppy is not running, '
                    'try removing the lock file %s' % lock.lock_file
                )
                click.echo(
                    'You can try removing the lock file by responding [rm]. '
                    'spoppy will exit on all other inputs'
                )
                try:
                    response = raw_input('')
                except NameError:
                    response = input('')
                if response == 'rm':
                    lock.break_lock()
                else:
                    raise TypeError('Could not get lock')
        except TypeError:
            pass
        else:
            check_internet_connection()
            # Check for updates
            check_for_updates(click, get_version(), lock)

            if username and password:
                set_config(username, password)
            else:
                username, password = get_config()
            if not (username and password):
                username, password = get_config_from_user()

            navigator = None
            try:
                navigator = Leifur(username, password)
                navigator.start()
            finally:
                if navigator:
                    navigator.shutdown()
                logger.debug('Finally, bye!')
        finally:
            if lock.i_am_locking():
                lock.release()
Esempio n. 54
0
 def lock(self):
     """
     Method used for acquiring a lock using the lockfile module.
     """
     lock = LockFile(self.lockfile)
     # check if it's locked
     if lock.is_locked():
         # Note that lock.i_am_locking() could be True, so
         # this apporach is not really efficient from a threading 
         # point of view. However, we must be consistent with 
         # MemcachedCacheQueue's behavior.
         return False
     # else we can attempt to acquire a lock
     # we don't want this to fail silently
     # so we set timeout=0
     lock.acquire(timeout=0)
     return True
Esempio n. 55
0
def basic_html(request):
    cache_key = "asv-test_web-basic_html"
    if LockFile is DummyLock:
        cache_key += os.environ.get('PYTEST_XDIST_WORKER', '')
    cache_dir = request.config.cache.makedir(cache_key)

    tmpdir = join(six.text_type(cache_dir), 'cached')
    lockfile = join(six.text_type(cache_dir), 'lock')

    lock = LockFile(lockfile)
    try:
        lock.acquire(timeout=900)
        html_dir, dvcs = _rebuild_basic_html(tmpdir)
    finally:
        lock.release()

    return html_dir, dvcs
Esempio n. 56
0
    def _lock_state_file(self):
        self._lock = LockFile(self.path)

        if (self._lock.is_locked() and
                (time() - getmtime(self._lock.lock_file)) > 10):
            self._lock.break_lock()

        self._lock.acquire()
Esempio n. 57
0
def get_auth_token(use_client_file=True, **kwargs):

    current_node = utils.get_node_or_source_node()
    if use_client_file:
        current_instance = utils.get_instance_or_source_instance()
        if constants.AUTH_TOKEN_VALUE in current_instance.runtime_properties:
            token = current_instance.runtime_properties[constants.AUTH_TOKEN_VALUE]
            if constants.AUTH_TOKEN_EXPIRY in current_instance.runtime_properties:
                token_expires = current_instance.runtime_properties[constants.AUTH_TOKEN_EXPIRY]
            else:
                token_expires = 0
        else:
            token = None
            token_expires = 0

        if os.path.isfile(constants.default_path_to_local_azure_token_file):
            token, token_expires = _generate_token_if_expired(constants.default_path_to_local_azure_token_file,
                                                              token, token_expires)
            return token

    try:
        config_path = current_node.properties.get(constants.path_to_azure_conf_key) or constants.default_path_to_azure_conf
        lock = LockFile(config_path)
        lock.acquire()
        token, token_expires = _get_token_from_file(config_path)
    except:
        err_message = "Failures while locking or using {}".format(config_path)
        ctx.logger.debug(err_message)
        lock.release()
        raise NonRecoverableError(err_message)

    token, token_expires = _generate_token_if_expired(config_path, token, token_expires)
    lock.release()
    return token
Esempio n. 58
0
def write_target_runtime_properties_to_file(required_keys, prefixed_keys=None, need_suffix=None):
    try:
        current_runtime_folder = constants.default_path_to_runtime_folder
        current_instance_key = "{0}{1}".format(ctx.source.node.id, ctx.source.instance.id)
        current_runtime_file_path = "{0}{1}".format(current_runtime_folder, current_instance_key)
        ctx.logger.info("current_runtime_file_path is {0}".format(current_runtime_file_path))
        lock = LockFile(current_runtime_file_path)
        lock.acquire()
        ctx.logger.info("{} is locked".format(lock.path))
        with open(current_runtime_file_path, 'a') as f:
            for curr_runtime_property in ctx.target.instance.runtime_properties:
                orig_runtime_property = curr_runtime_property
                if required_keys and curr_runtime_property in required_keys:
                    if need_suffix and (curr_runtime_property in need_suffix):
                        curr_runtime_property = "{0}{1}{2}".format(curr_runtime_property, ctx.source.node.id, ctx.source.instance.id)
                        ctx.logger.info("curr_runtime_property is {0}".format(curr_runtime_property))
                    current_line = "{0}={1}\n".format(curr_runtime_property, ctx.target.instance.runtime_properties[orig_runtime_property])
                    f.write(current_line)
                else:
                    if prefixed_keys is not None:
                        for curr_prefixed_key in prefixed_keys:
                            if curr_runtime_property.startswith(curr_prefixed_key):
                                current_line = "{0}={1}\n".format(curr_runtime_property, ctx.target.instance.runtime_properties[curr_runtime_property])
                                f.write(current_line)
        f.close()
    except:
        ctx.logger.info("Failures while locking or using {}".format(current_runtime_file_path))
        lock.release()
        raise NonRecoverableError("Failures while locking or using {}".format(current_runtime_file_path))

    lock.release()
    ctx.logger.info("{} is released".format(current_runtime_file_path))
Esempio n. 59
0
def concat_log_tmp_to_perm(log_file_tmp, log_file_perm, log_lock_path):
    """
    Combines logs on the temporary file system with the logs on the SD card.
    
    :return:
    :rtype:

    :param log_file_tmp: Path to the Log File on the tmpfs
    :type log_file_tmp: str
    :param log_file_perm: Path to the Log File on the SD Card
    :type log_file_perm: str
    :param log_lock_path: Path to the lock file
    :type log_lock_path: str
    """
    # Daemon Logs
    if not filecmp.cmp(log_file_tmp, log_file_perm):
        logging.debug("[Log Backup] Concatenating log cache"
                      " ({}) to permanent storage ({})".format(log_file_tmp, log_file_perm))
        lock = LockFile(log_lock_path)

        while not lock.i_am_locking():
            try:
                logging.debug("[Log Backup] Acquiring Lock: {}".format(lock.path))
                lock.acquire(timeout=60)  # wait up to 60 seconds
            except:  # TODO Needs better catch statement
                logging.warning("[Log Backup] Breaking Lock to Acquire: {}".format(lock.path))
                lock.break_lock()
                lock.acquire()
            finally:
                logging.debug("[Log Backup] Gained lock: {}".format(lock.path))
        try:
            with open(log_file_perm, 'a') as fout, open(log_file_tmp, 'r+') as tmp_log:
                for line in tmp_log:
                    fout.write(line)
            logging.debug("[Log Backup] Appended log data to {}".format(log_file_perm))
            tmp_log.truncate()  # Clear tmp_log if we've copied the lines over
        except:  # TODO Needs better catch statement
            logging.warning("[Log Backup] Unable to append data to {}".format(log_file_perm))

        logging.debug("[Log Backup] Removing lock: {}".format(lock.path))
        lock.release()
    else:
        logging.debug(
                "[Log Backup] Logs the same, skipping. ({}) ({})".format(log_file_tmp,
                                                                         log_file_perm))