def generateQueryAndQueryVectorMap(line_tmp):
    sentencevector = []
    # print "Number of Records Left:\t" + str(corpuscount - tmpcount)
    query = line_tmp.lower()
    component_word = query.split(' ')
    for one_word in component_word:
        if redis_handle.exists(one_word):
            vector_tmp = redis_handle.get(one_word)
            vector_final = normalize_redis_vector(vector_tmp)
            sentencevector.append(vector_final)
            #indexnum = vocab_dict.get(one_word)
            #sentencevector.append((repvector[indexnum]).tolist())
        else:
            sentencevector.append([float(0)] * vector_size)
    l = numpy.array(sentencevector)
    # Centroid Calculation - each sentence.
    # Sums up all vectors (columns) and generates a final list (1D)of size vector_size
    lmt = numpy.array(l.sum(axis=0, dtype=numpy.float32)).tolist()

    if (lmt != 0.0):
        # Averages the vectors based on the number of words in each sentence.
        query_vector = [x / len(component_word) for x in lmt]
    else:
        query_vector = [float(0)] * vector_size

    filename = getRandomOutputFilename()
    lock = LockFile(filename)
    lock.acquire()
    # Open a file handle to the lock file
    fh = open(filename, 'w')
    fh.write(str(query)+"\t")
    for item in query_vector:
        fh.write("%s " % str(item))
    fh.close()
    lock.release()
Beispiel #2
0
    def check_sync(self):
        if not os.path.exists(COLLECTD_FILE):
            return

        lock = LockFile(COLLECTD_FILE)

        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=5)
            except LockTimeout:
                return

        with open(COLLECTD_FILE, "rb") as f:
            try:
                data = pickle.loads(f.read())
            except Exception:
                data = {}

        lock.release()

        alerts = []
        for k, v in list(data.items()):
            if k == "ctl-ha/disk_octets":
                title = "CTL HA link is actively used, check initiators connectivity"
            else:
                title = k

            if v["Severity"] == "WARNING":
                level = AlertLevel.WARNING
            else:
                level = AlertLevel.CRITICAL

            alerts.append(Alert(title, level=level))

        return alerts
Beispiel #3
0
def ReadTestCase( resourcefile ) :
	list = []
	row_index = 0
	from lockfile import LockFile
	lock = LockFile(resourcefile)
	lockid = lock.is_locked()
	print lockid
	for a in xrange(1, 2):
		if lockid == False:
			lock.acquire()
			print "I have locked Resource File"
			break
		else:
			time.sleep (10)
		lockid = lock.is_locked()
	rb = open_workbook(resourcefile)
	r_sheet = rb.sheet_by_index(0)
	wb = copy(rb)
	w_sheet = wb.get_sheet(0)
	keys = [r_sheet.cell(0, col_index).value for col_index in xrange(r_sheet.ncols)]
	j = r_sheet.nrows
	q = r_sheet.ncols
	print col_index
	while row_index < j: 
		d = {keys[col_index]: r_sheet.cell(row_index, col_index).value for col_index in xrange(r_sheet.ncols)}
		temp = ""
		if ( d['Execution'] == "yes") :
			temp = d['TC Name']
			print temp
			list.append(temp)
			wb.save(resourcefile)
		row_index = row_index + 1
	lock.release()
	return list
Beispiel #4
0
def GetPassword( resourcefile , list ) :
	count = 0
	pwd = []
	rb = open_workbook(resourcefile)
	r_sheet = rb.sheet_by_index(0)
	from lockfile import LockFile
	lock = LockFile(resourcefile)
	lockid = lock.is_locked()
	print lockid
	for a in xrange(1, 10):
		if lockid == False:
			lock.acquire()
			print "I have locked Resource File"
			break
		else:
			time.sleep (10)
		lockid = lock.is_locked()
	wb = copy(rb)
	w_sheet = wb.get_sheet(0)
	keys = [r_sheet.cell(0, col_index).value for col_index in xrange(r_sheet.ncols)]
	for row_index in xrange(1, r_sheet.nrows):
		d = {keys[col_index]: r_sheet.cell(row_index, col_index).value 
			for col_index in xrange(r_sheet.ncols)} 
		if ( d['IP'] in list) :
			count = count + 1
			pwd = d['Password']
			wb.save(resourcefile)
			lock.release()
		if(count == len(list)+1) :
			break
	return pwd
Beispiel #5
0
    def read(self):
        """
        Takes a reading from the MH-Z19 and updates the self._co2 value

        :returns: None on success or 1 on error
        """
        if not self.serial_device:  # Don't measure if device isn't validated
            return None

        lock = LockFile(self.k30_lock_file)
        try:
            # Acquire lock on MHZ19 to ensure more than one read isn't
            # being attempted at once.
            while not lock.i_am_locking():
                try:  # wait 60 seconds before breaking lock
                    lock.acquire(timeout=60)
                except Exception as e:
                    self.logger.error(
                        "{cls} 60 second timeout, {lock} lock broken: "
                        "{err}".format(cls=type(self).__name__,
                                       lock=self.k30_lock_file,
                                       err=e))
                    lock.break_lock()
                    lock.acquire()
            self._co2 = self.get_measurement()
            lock.release()
            if self._co2 is None:
                return 1
            return  # success - no errors
        except Exception as e:
            self.logger.error(
                "{cls} raised an exception when taking a reading: "
                "{err}".format(cls=type(self).__name__, err=e))
            lock.release()
            return 1
def get_auth_token(use_client_file=True, **kwargs):

    current_node = utils.get_node_or_source_node()
    if use_client_file:
        current_instance = utils.get_instance_or_source_instance()
        if constants.AUTH_TOKEN_VALUE in current_instance.runtime_properties:
            token = current_instance.runtime_properties[constants.AUTH_TOKEN_VALUE]
            if constants.AUTH_TOKEN_EXPIRY in current_instance.runtime_properties:
                token_expires = current_instance.runtime_properties[constants.AUTH_TOKEN_EXPIRY]
            else:
                token_expires = 0
        else:
            token = None
            token_expires = 0

        if os.path.isfile(constants.default_path_to_local_azure_token_file):
            token, token_expires = _generate_token_if_expired(constants.default_path_to_local_azure_token_file,
                                                              token, token_expires)
            return token

    try:
        config_path = current_node.properties.get(constants.path_to_azure_conf_key) or constants.default_path_to_azure_conf
        lock = LockFile(config_path)
        lock.acquire()
        token, token_expires = _get_token_from_file(config_path)
    except:
        err_message = "Failures while locking or using {}".format(config_path)
        ctx.logger.debug(err_message)
        lock.release()
        raise NonRecoverableError(err_message)

    token, token_expires = _generate_token_if_expired(config_path, token, token_expires)
    lock.release()
    return token
Beispiel #7
0
def write_ht_sensor_log(sensor_ht_read_temp_c, sensor_ht_read_hum, sensor_ht_dewpt_c, sensor):
    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)

    lock = LockFile(sensor_ht_log_lock_path)
    while not lock.i_am_locking():
        try:
            logging.debug("[Write Sensor Log] Acquiring Lock: %s", lock.path)
            lock.acquire(timeout=60)    # wait up to 60 seconds
        except:
            logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path)
            lock.break_lock()
            lock.acquire()

    logging.debug("[Write Sensor Log] Gained lock: %s", lock.path)

    try:
        with open(sensor_ht_log_file_tmp, "ab") as sensorlog:
            sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f} {4:d}\n'.format(
                datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"),
                sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor))
            logging.debug("[Write Sensor Log] Data appended to %s", sensor_ht_log_file_tmp)
    except:
        logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_ht_log_file_tmp)

    logging.debug("[Write Sensor Log] Removing lock: %s", lock.path)
    lock.release()
Beispiel #8
0
def IoOperat_multi(tmpfile, mode, statisticArr, chunker):
    # tmpfile = "value_" + md5 + ".dat"
    with open(tmpfile, "rb") as f:
        fields = pickle.load(f)
        samples = pickle.load(f)
        headers = pickle.load(f)
        filepath_json = pickle.load(f)
    recordstring, infonum, infoSpecial = chunker2string(
        chunker, fields, samples, mode)
    if sys.platform.startswith('linux'):
        with open(filepath_json, "a") as fp:
            fcntl.flock(fp.fileno(), fcntl.LOCK_EX)
            statisticArr[0] += chunker[1]
            statisticArr[1] += infonum
            statisticArr[2] += infoSpecial
            fp.write(recordstring)
    else:
        lock = LockFile(filepath_json)
        lock.acquire()
        with open(filepath_json, "a") as fp:
            statisticArr[0] += chunker[1]
            statisticArr[1] += infonum
            statisticArr[2] += infoSpecial
            fp.write(recordstring)
        lock.release()
    return
    def addScanResult( self,\
                       scanResult,\
                       ADD_MODE = NEW_SCAN_RESULT ):

        lock = LockFile(self.m_TokenFileName)

        #ОБРАБОТКА НЕВОЗМОЖНОСТИ ДОЖДАТЬСЯ РАЗБЛОКИРОВАНИЯ ФАЙЛА
        lock.acquire(SECONDS_WAIT_FOR_UNLOCK)

        f = open(self.m_TokenFileName,\
                 'r+')
        listScanResult = self.loadScanResults(f)

        idToken = 0

        if (ADD_MODE == TO_EXIST_SCAN_RESULT):
            listScanResult.setScanResultByIdToken(scanResult)
        else:
            idToken = listScanResult.addScanResult(scanResult)

        f.seek(0)
        f.write(listScanResult.toJSON())
        f.close()
        lock.release()

        return idToken
Beispiel #10
0
    def run(self):
        self.set_arguments()
        args = self.parser.parse_args()
        if args.verbose:
            set_logging(level=logging.DEBUG)
        elif args.quiet:
            set_logging(level=logging.WARNING)
        else:
            set_logging(level=logging.INFO)

        lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE))
        try:
            lock.acquire(timeout=-1)
            args.func(args)
        except AttributeError:
            if hasattr(args, 'func'):
                raise
            else:
                self.parser.print_help()
        except KeyboardInterrupt:
            pass
        except AlreadyLocked:
            logger.error("Could not proceed - there is probably another instance of Atomic App running on this machine.")
        except Exception as ex:
            if args.verbose:
                raise
            else:
                logger.error("Exception caught: %s", repr(ex))
                logger.error(
                    "Run the command again with -v option to get more information.")
        finally:
            if lock.i_am_locking():
                lock.release()
Beispiel #11
0
def generateQueryAndQueryVectorMap(line_tmp):
    sentencevector = []
    # print "Number of Records Left:\t" + str(corpuscount - tmpcount)
    query = line_tmp.lower()
    component_word = query.split(' ')
    for one_word in component_word:
        if redis_handle.exists(one_word):
            vector_tmp = redis_handle.get(one_word)
            vector_final = normalize_redis_vector(vector_tmp)
            sentencevector.append(vector_final)
            #indexnum = vocab_dict.get(one_word)
            #sentencevector.append((repvector[indexnum]).tolist())
        else:
            sentencevector.append([float(0)] * vector_size)
    l = numpy.array(sentencevector)
    # Centroid Calculation - each sentence.
    # Sums up all vectors (columns) and generates a final list (1D)of size vector_size
    lmt = numpy.array(l.sum(axis=0, dtype=numpy.float32)).tolist()

    if (lmt != 0.0):
        # Averages the vectors based on the number of words in each sentence.
        query_vector = [x / len(component_word) for x in lmt]
    else:
        query_vector = [float(0)] * vector_size

    filename = getRandomOutputFilename()
    lock = LockFile(filename)
    lock.acquire()
    # Open a file handle to the lock file
    fh = open(filename, 'w')
    fh.write(str(query) + "\t")
    for item in query_vector:
        fh.write("%s " % str(item))
    fh.close()
    lock.release()
Beispiel #12
0
    def write_dsn(self, wdmpath, dsn, data):
        """Write to self.wdmfp/dsn the time-series data."""
        dsn_desc = self.describe_dsn(wdmpath, dsn)
        tcode = dsn_desc['tcode']
        tstep = dsn_desc['tstep']
        tsfill = dsn_desc['tsfill']

        data.fillna(tsfill, inplace=True)
        start_date = data.index[0]

        dstart_date = start_date.timetuple()[:6]
        llsdat = self._tcode_date(tcode, dstart_date)
        if dsn_desc['base_year'] > llsdat[0]:
            raise ValueError("""
*
*   The base year for this DSN is {0}.  All data to insert must be after the
*   base year.  Instead the first year of the series is {1}.
*
""".format(dsn_desc['base_year'], llsdat[0]))

        nval = len(data)
        lock = LockFile(wdmpath)
        lock.acquire()
        wdmfp = self._open(wdmpath, 58)
        retcode = self.wdtput(wdmfp, dsn, tstep, llsdat, nval, 1, 0, tcode,
                              data)
        lock.release()
        self._close(wdmpath)
        self._retcode_check(retcode, additional_info='wdtput')
def write_relay_log(relayNumber, relaySeconds):
    config = ConfigParser.RawConfigParser()

    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)
    if not Terminate:
        lock = LockFile(relay_lock_path)
        while not lock.i_am_locking():
            try:
                logging.info("[Write Relay Log] Acquiring Lock: %s", lock.path)
                lock.acquire(timeout=60)    # wait up to 60 seconds
            except:
                logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path)
                lock.break_lock()
                lock.acquire()
        logging.info("[Write Relay Log] Gained lock: %s", lock.path)
        relay = [0] * 9
        for n in range(1, 9):
            if n == relayNumber:
                relay[relayNumber] = relaySeconds
        try:
            with open(relay_log_file_tmp, "ab") as relaylog:
                relaylog.write('{0} {1} {2} {3} {4} {5} {6} {7} {8}\n'.format(
                    datetime.datetime.now().strftime("%Y %m %d %H %M %S"), 
                    relay[1], relay[2], relay[3], relay[4],
                    relay[5], relay[6], relay[7], relay[8]))
        except:
            logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp)
        logging.info("[Write Relay Log] Removing lock: %s", lock.path)
        lock.release()
Beispiel #14
0
def main():

    lock = LockFile(SMART_FILE)
    while not lock.i_am_locking():
        try:
            lock.acquire(timeout=5)
        except LockTimeout:
            lock.break_lock()

    data = {}
    if os.path.exists(SMART_FILE):
        with open(SMART_FILE, 'rb') as f:
            try:
                data = pickle.loads(f.read())
            except:
                pass

    device = os.environ.get('SMARTD_DEVICE')
    if device not in data:
        data[device] = []

    message = os.environ.get('SMARTD_MESSAGE')
    if message not in data[device]:
        data[device].append(message)

    with open(SMART_FILE, 'wb') as f:
        f.write(pickle.dumps(data))

    lock.release()
    def log_output(self, status_str, target, meas, x_opt, Xtrue, r, m, t1, t2,
                   t3, mem_used, A, y):
        # write output to a file
        # we should use a different file for each trial and then just do
        # a "reduce".  This way any individual task isn't too long
        try:
            lock = LockFile(self.filename)
        except:
            raise "Could not open file"

        tol = 1e-2
        err0 = 0
        err1 = np.linalg.norm(Xtrue - x_opt, 'fro') / np.linalg.norm(Xtrue)
        # if (m > 25):
        #                    temp = Xtrue - x_opt
        #                    scipy.io.savemat('out.mat', mdict={'true': Xtrue, 'opt' : x_opt, 'A':A, 'y':y})
        #                    print temp
        #                    exit(0)
        if err1 > tol:
            err0 = 1
        lock.acquire()
        with open(self.filename, 'a') as my_file:
            try:
                date_str = datetime.datetime.now().strftime("%b-%d-%I:%M%p")
                my_file.write(
                    self.formatstring %
                    (date_str, status_str, common.TARGET_NAMES[target],
                     common.ENSEMBLE_NAMES[meas], self.n, r, m, err1, err0, t1,
                     t2, t3, mem_used))
            except:
                print "Could not open file!"
        lock.release()
Beispiel #16
0
    def main(username, password):
        # Ignore error, logging set up in logging utils
        from . import logging_utils
        from .navigation import Leifur
        from .config import get_config, set_config, get_config_from_user

        lock = LockFile('/tmp/spoppy')
        try:
            # Try for 5s to acquire the lock
            lock.acquire(5)
        except LockTimeout:
            click.echo('Could not acquire lock, is spoppy running?')
            click.echo(
                'If you\'re sure that spoppy is not running, '
                'try removing the lock file %s' % lock.lock_file
            )
        else:

            if username and password:
                set_config(username, password)
            else:
                username, password = get_config()
            if not (username and password):
                username, password = get_config_from_user()
            navigator = None
            try:
                navigator = Leifur(username, password)
                navigator.start()
            finally:
                if navigator:
                    navigator.shutdown()
                logger.debug('Finally, bye!')
        finally:
            if lock.i_am_locking():
                lock.release()
Beispiel #17
0
def write_ht_sensor_log(sensor_ht_read_temp_c, sensor_ht_read_hum, sensor_ht_dewpt_c, sensor):
    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)

    lock = LockFile(sensor_ht_log_lock_path)
    while not lock.i_am_locking():
        try:
            logging.debug("[Write Sensor Log] Acquiring Lock: %s", lock.path)
            lock.acquire(timeout=60)    # wait up to 60 seconds
        except:
            logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path)
            lock.break_lock()
            lock.acquire()

    logging.debug("[Write Sensor Log] Gained lock: %s", lock.path)

    try:
        with open(sensor_ht_log_file_tmp, "ab") as sensorlog:
            sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f} {4}\n'.format(
                datetime.datetime.now().strftime("%Y %m %d %H %M %S"),
                sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor))
            logging.debug("[Write Sensor Log] Data appended to %s", sensor_ht_log_file_tmp)
    except:
        logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_ht_log_file_tmp)

    logging.debug("[Write Sensor Log] Removing lock: %s", lock.path)
    lock.release()
Beispiel #18
0
def save_Y(pos, move, text_count):
    log_move = "Y:{}:{}".format(pos.get_board_piece(move[0]), render(119-move[1]))
    lock = LockFile("log_Y.txt")
    lock.acquire()
    with open("log_Y.txt", "a") as log_Y:
        log_Y.write(str(text_count) + " " + log_move + "\n")
    lock.release()
Beispiel #19
0
def get( arquivo ):
    lock = LockFile(arquivo) # abrir chamada com LockFile
    lock.acquire() # bloquear arquivo

    if( isEmpty(arquivo) ): # verifica se arquivo de entrada esta vazio
        return
    arq = open(arquivo, 'r')    # abre arquivo no modo leitura
    firstLine = arq.readline()  # le e excluir a primeira linha do arquivo
    linhas = arq.read()           # le arquivo todo
    arq.close()                        # fecha arquivo

    # Gravando elementos no arquivo sem a primeira linha
    arq = open(arquivo, 'w')
    arq.writelines(linhas)
    arq.close()

    lock.release() # desbloquear arquivo

    runn = 'data/running.txt'
    lock = LockFile(runn) # abrir chamada de arquivo running com LockFile
    lock.acquire() # bloquear arquivo

    arq = open(runn, 'a')
    arq.write(firstLine)
    arq.close()

    lock.release() # desbloquear arquivo

    return firstLine
Beispiel #20
0
 def release(self):
     """
     Method used to release a lock using the lockfile module.
     """
     lock = LockFile(self.lockfile)
     if lock.i_am_locking():
         lock.release()
Beispiel #21
0
 def query(self, query_str):
     """ Send command and return reply """
     lock_file_amend = '{lf}.{dev}'.format(lf=ATLAS_PH_LOCK_FILE,
                                           dev=self.serial_device.replace(
                                               "/", "-"))
     lock = LockFile(lock_file_amend)
     try:
         while not lock.i_am_locking():
             try:
                 lock.acquire(
                     timeout=10
                 )  # wait up to 10 seconds before breaking lock
             except Exception as e:
                 logger.exception(
                     "{cls} 10 second timeout, {lock} lock broken: "
                     "{err}".format(cls=type(self).__name__,
                                    lock=lock_file_amend,
                                    err=e))
                 lock.break_lock()
                 lock.acquire()
         self.send_cmd(query_str)
         time.sleep(1.3)
         response = self.read_lines()
         lock.release()
         return response
     except Exception as err:
         logger.exception(
             "{cls} raised an exception when taking a reading: "
             "{err}".format(cls=type(self).__name__, err=err))
         lock.release()
         return None
Beispiel #22
0
    def run(self):
        alerts = []

        if not os.path.exists(COLLECTD_FILE):
            return alerts

        lock = LockFile(COLLECTD_FILE)

        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=5)
            except LockTimeout:
                return alerts

        with open(COLLECTD_FILE, 'rb') as f:
            try:
                data = pickle.loads(f.read())
            except:
                data = {}

        lock.release()

        for k, v in list(data.items()):
            if v['Severity'] == 'WARNING':
                l = Alert.WARN
            else:
                l = Alert.CRIT
            if k == 'ctl-ha/disk_octets':
                msg = "CTL HA link is actively used, check initiators connectivity"
            else:
                msg = k
            alerts.append(Alert(l, msg))

        return alerts
Beispiel #23
0
def store(email,nickname,number,rate,strs,regressions):
    # User-entered data hits the filesystem here.  
    if not validate_email(email):
        return

    newcontrib = [ bleach.clean(email), 
                   bleach.clean(nickname), 
                   bleach.clean(number), 
                   bleach.clean(rate),
                   bleach.clean(strs),
                   bleach.clean(regressions)]

    lock = LockFile("/var/local/bz-triage/contributors.cfg")
    lock.acquire()
    
    try:  
        contributors = json.load(open("/var/local/bz-triage/contributors.cfg"))
    except:
        logging.info("Failed to open the file...")
        contributors = list()

    for existing in contributors:
        if existing[0] == newcontrib[0]:
            contributors.remove(existing)
    contributors.append( newcontrib )

    with open("/var/local/bz-triage/contributors.cfg", 'w') as outfile:
        json.dump(contributors, outfile)
    lock.release()
Beispiel #24
0
def write_relay_log(relayNumber, relaySeconds, sensor, gpio):
    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)

    lock = LockFile(relay_log_lock_path)

    while not lock.i_am_locking():
        try:
            logging.debug("[Write Relay Log] Acquiring Lock: %s", lock.path)
            lock.acquire(timeout=60)    # wait up to 60 seconds
        except:
            logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path)
            lock.break_lock()
            lock.acquire()

    logging.debug("[Write Relay Log] Gained lock: %s", lock.path)

    try:
        with open(relay_log_file_tmp, "ab") as relaylog:
            relaylog.write('{0} {1:d} {2:d} {3:d} {4:.2f}\n'.format(
                datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"),
                sensor, relayNumber, gpio, relaySeconds))

    except:
        logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp)

    logging.debug("[Write Relay Log] Removing lock: %s", lock.path)
    lock.release()
def write_target_runtime_properties_to_file(required_keys, prefixed_keys=None, need_suffix=None):
    try:
        current_runtime_folder = constants.default_path_to_runtime_folder
        current_instance_key = "{0}{1}".format(ctx.source.node.id, ctx.source.instance.id)
        current_runtime_file_path = "{0}{1}".format(current_runtime_folder, current_instance_key)
        ctx.logger.info("current_runtime_file_path is {0}".format(current_runtime_file_path))
        lock = LockFile(current_runtime_file_path)
        lock.acquire()
        ctx.logger.info("{} is locked".format(lock.path))
        with open(current_runtime_file_path, 'a') as f:
            for curr_runtime_property in ctx.target.instance.runtime_properties:
                orig_runtime_property = curr_runtime_property
                if required_keys and curr_runtime_property in required_keys:
                    if need_suffix and (curr_runtime_property in need_suffix):
                        curr_runtime_property = "{0}{1}{2}".format(curr_runtime_property, ctx.source.node.id, ctx.source.instance.id)
                        ctx.logger.info("curr_runtime_property is {0}".format(curr_runtime_property))
                    current_line = "{0}={1}\n".format(curr_runtime_property, ctx.target.instance.runtime_properties[orig_runtime_property])
                    f.write(current_line)
                else:
                    if prefixed_keys is not None:
                        for curr_prefixed_key in prefixed_keys:
                            if curr_runtime_property.startswith(curr_prefixed_key):
                                current_line = "{0}={1}\n".format(curr_runtime_property, ctx.target.instance.runtime_properties[curr_runtime_property])
                                f.write(current_line)
        f.close()
    except:
        ctx.logger.info("Failures while locking or using {}".format(current_runtime_file_path))
        lock.release()
        raise NonRecoverableError("Failures while locking or using {}".format(current_runtime_file_path))

    lock.release()
    ctx.logger.info("{} is released".format(current_runtime_file_path))
Beispiel #26
0
    def run(self):
        alerts = []

        if not os.path.exists(SMART_FILE):
            return alerts

        lock = LockFile(SMART_FILE)

        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=5)
            except LockTimeout:
                return alerts

        with open(SMART_FILE, 'rb') as f:
            try:
                data = pickle.loads(f.read())
            except:
                data = {}

        msg = ''
        for msgs in data.itervalues():
            if not msgs:
                continue
            msg += '<br />\n'.join(msgs)

        if msg:
            alerts.append(Alert(Alert.CRIT, msg))

        lock.release()

        return alerts
def write_sensor_log():
    config = ConfigParser.RawConfigParser()

    if not os.path.exists(lock_directory):
        os.makedirs(lock_directory)
    if not Terminate:
        lock = LockFile(sensor_lock_path)
        while not lock.i_am_locking():
            try:
                logging.info("[Write Sensor Log] Acquiring Lock: %s", lock.path)
                lock.acquire(timeout=60)    # wait up to 60 seconds
            except:
                logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path)
                lock.break_lock()
                lock.acquire()
        logging.info("[Write Sensor Log] Gained lock: %s", lock.path)
        try:
            with open(sensor_log_file_tmp, "ab") as sensorlog:
                sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f}\n'.format(
                    datetime.datetime.now().strftime("%Y %m %d %H %M %S"), 
                    tempc, humidity, dewpointc))
                logging.info("[Write Sensor Log] Data appended to %s", sensor_log_file_tmp)
        except:
            logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_log_file_tmp)
        logging.info("[Write Sensor Log] Removing lock: %s", lock.path)
        lock.release()
        def addScanResult( self,\
                           scanResult,\
                           ADD_MODE = NEW_SCAN_RESULT ):
                
                lock = LockFile(self.m_TokenFileName)

		#ОБРАБОТКА НЕВОЗМОЖНОСТИ ДОЖДАТЬСЯ РАЗБЛОКИРОВАНИЯ ФАЙЛА
                lock.acquire( SECONDS_WAIT_FOR_UNLOCK )

                f = open(self.m_TokenFileName,\
                         'r+')
                listScanResult = self.loadScanResults( f )

                idToken = 0
                
                if ( ADD_MODE == TO_EXIST_SCAN_RESULT ):
                        listScanResult.setScanResultByIdToken( scanResult )
                else:
                        idToken = listScanResult.addScanResult( scanResult )

                f.seek(0)
                f.write( listScanResult.toJSON() )
                f.close()
                lock.release()

                return idToken
Beispiel #29
0
    def run(self):
        alerts = []

        if not os.path.exists(SMART_FILE):
            return alerts

        lock = LockFile(SMART_FILE)

        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=5)
            except LockTimeout:
                return alerts

        with open(SMART_FILE, 'rb') as f:
            try:
                data = pickle.loads(f.read())
            except:
                data = {}

        for msgs in data.itervalues():
            if not msgs:
                continue
            for msg in msgs:
                if msg is None:
                    continue
                alerts.append(Alert(Alert.CRIT, msg, hardware=True))

        lock.release()

        return alerts
Beispiel #30
0
    def write(self, path):
        """Writes the PlPlugMapM contents to a file."""

        try:
            from lockfile import LockFile
            useLockFile = True
        except:
            useLockFile = False

        if useLockFile:
            # Get locked file
            lock = LockFile(path)
            if not lock.is_locked():
                # Write plugmap file
                lock.acquire()
                ff = open(path, 'w')
                ff.write(self.data._contents)
                ff.close()
                lock.release()
            else:
                raise PlPlugMapMFileException(
                    'path {0} is locked'.format(path))
        else:
            ff = open(path, 'w')
            ff.write(self.data._contents)
            ff.close()
Beispiel #31
0
    def run(self):
        alerts = []

        if not os.path.exists(SMART_FILE):
            return alerts

        lock = LockFile(SMART_FILE)

        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=5)
            except LockTimeout:
                return alerts

        with open(SMART_FILE, 'rb') as f:
            try:
                data = pickle.loads(f.read())
            except:
                data = {}

        msg = ''
        for msgs in data.itervalues():
            if not msgs:
                continue
            msg += '<br />\n'.join(msgs)

        if msg:
            alerts.append(Alert(Alert.CRIT, msg))

        lock.release()

        return alerts
Beispiel #32
0
    def read(self):
        """
        Takes a reading from the K30 and updates the self._co2 value

        :returns: None on success or 1 on error
        """
        lock = LockFile(K30_LOCK_FILE)
        try:
            # Acquire lock on K30 to ensure more than one read isn't
            # being attempted at once.
            while not lock.i_am_locking():
                try:
                    lock.acquire(timeout=60)  # wait up to 60 seconds before breaking lock
                except Exception as e:
                    logger.error("{cls} 60 second timeout, {lock} lock broken: "
                                 "{err}".format(cls=type(self).__name__,
                                                lock=K30_LOCK_FILE,
                                                err=e))
                    lock.break_lock()
                    lock.acquire()
            self._co2 = self.get_measurement()
            lock.release()
            if self._co2 is None:
                return 1
            return  # success - no errors
        except Exception as e:
            logger.error("{cls} raised an exception when taking a reading: "
                         "{err}".format(cls=type(self).__name__, err=e))
            lock.release()
            return 1
Beispiel #33
0
    def run_function_with_lock(self, function, lock_file, timeout=30, args=[], kwargs={}):
        self.logger.debug('starting function with lock: %s' % lock_file)
        lock = LockFile(lock_file)
        try:
            while not lock.i_am_locking():
                try:
                    lock.acquire(timeout=timeout)
                except (LockTimeout, NotMyLock) as e:
                    self.logger.debug('breaking lock')
                    lock.break_lock()
                    lock.acquire()
                    self.logger.exception(e)

            self.logger.debug('lock acquired: starting function')
            return function(*args, **kwargs)
        finally:
            self.logger.debug('function done, releasing lock')

            if lock.is_locked():
                try:
                    lock.release()
                except NotMyLock:
                    try:
                        os.remove(lock_file)
                    except Exception as e:
                        self.logger.exception(e)
            self.logger.debug('lock released')
Beispiel #34
0
    def run(self):
        alerts = []

        if not os.path.exists(COLLECTD_FILE):
            return alerts

        lock = LockFile(COLLECTD_FILE)

        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=5)
            except LockTimeout:
                return alerts

        with open(COLLECTD_FILE, 'rb') as f:
            try:
                data = pickle.loads(f.read())
            except:
                data = {}

        lock.release()

        for k, v in list(data.items()):
            if v['Severity'] == 'WARNING':
                l = Alert.WARN
            else:
                l = Alert.CRIT
            if k == 'ctl-ha/disk_octets':
                msg = "CTL HA link is actively used, check initiators connectivity"
            else:
                msg = k
            alerts.append(Alert(l, msg))

        return alerts
Beispiel #35
0
    def __setitem__(self, key, item):

        saved = False

        try:
            saved = self._save(os.path.join(self._path, key), item)
        except:
            saved = False

        if not saved:
            return

        lock = LockFile(self._lock_filename)
        lock.acquire()

        try:
            with open(self._index_filename, "r") as f:
                index = json.load(f)
        except:
            index = dict()

        index[key] = type(item).__name__

        with open(self._index_filename, "w") as f:
            json.dump(index, f, indent=4)

        lock.release()
Beispiel #36
0
	def on_post(self, req, resp, id):
		try:
			user = req.context['user']

			# Kontrola existence ulohy
			task = session.query(model.Task).get(id)
			if task is None:
				req.context['result'] = 'Neexistujici uloha'
				resp.status = falcon.HTTP_404
				return

			# Kontrola existence git_branch a git_path
			if (task.git_path is None) or (task.git_branch is None):
				req.context['result'] = 'Uloha nema zadanou gitovskou vetev nebo adresar'
				resp.status = falcon.HTTP_400
				return

			if task.git_branch == "master":
				req.context['result'] = 'Uloha je j*z ve vetvi master'
				resp.status = falcon.HTTP_400
				return

			wave = session.query(model.Wave).get(task.wave)

			# Merge mohou provadet pouze administratori a garant vlny
			if (not user.is_logged_in()) or ((not user.is_admin()) and (user.id != wave.garant)):
				req.context['result'] = 'Nedostatecna opravneni'
				resp.status = falcon.HTTP_400
				return

			# Kontrola zamku
			lock = util.lock.git_locked()
			if lock:
				req.context['result'] = 'GIT uzamcen zámkem '+lock + "\nNekdo momentalne provadi akci s gitem, opakujte prosim akci za 20 sekund."
				resp.status = falcon.HTTP_409
				return

			try:
				mergeLock = LockFile(util.admin.taskMerge.LOCKFILE)
				mergeLock.acquire(60) # Timeout zamku je 1 minuta

				# Fetch repozitare
				repo = git.Repo(util.git.GIT_SEMINAR_PATH)

				if task.git_branch in repo.heads:
					# Cannot delete branch we are on
					repo.git.checkout("master")
					repo.git.branch('-D', task.git_branch)

				task.git_branch = 'master'

				session.commit()
				resp.status = falcon.HTTP_200
			finally:
				mergeLock.release()
		except SQLAlchemyError:
			session.rollback()
			raise
		finally:
			session.close()
Beispiel #37
0
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
    if LockFile is DummyLock:
        cache_key = cache_key + os.environ.get('PYTEST_XDIST_WORKER', '')

    base_dir = config.cache.makedir(cache_key)

    lockfile = join(six.text_type(base_dir), 'lock')
    cache_dir = join(six.text_type(base_dir), 'cache')

    lock = LockFile(lockfile)
    lock.acquire(timeout=timeout)
    try:
        # Clear cache dir contents if it was generated with different
        # asv version
        tag_fn = join(six.text_type(base_dir), 'tag.json')
        tag_content = [asv.__version__, repr(tag)]
        if os.path.isdir(cache_dir):
            try:
                if util.load_json(tag_fn) != tag_content:
                    raise ValueError()
            except (IOError, ValueError, util.UserError):
                shutil.rmtree(cache_dir)

        if not os.path.isdir(cache_dir):
            os.makedirs(cache_dir)

        yield cache_dir

        util.write_json(tag_fn, tag_content)
    finally:
        lock.release()
Beispiel #38
0
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
    if LockFile is DummyLock:
        cache_key = cache_key + os.environ.get('PYTEST_XDIST_WORKER', '')

    base_dir = config.cache.makedir(cache_key)

    lockfile = join(six.text_type(base_dir), 'lock')
    cache_dir = join(six.text_type(base_dir), 'cache')

    lock = LockFile(lockfile)
    lock.acquire(timeout=timeout)
    try:
        # Clear cache dir contents if it was generated with different
        # asv version
        tag_fn = join(six.text_type(base_dir), 'tag.json')
        tag_content = [asv.__version__, repr(tag)]
        if os.path.isdir(cache_dir):
            try:
                if util.load_json(tag_fn) != tag_content:
                    raise ValueError()
            except (IOError, ValueError, util.UserError):
                shutil.rmtree(cache_dir)

        if not os.path.isdir(cache_dir):
            os.makedirs(cache_dir)

        yield cache_dir

        util.write_json(tag_fn, tag_content)
    finally:
        lock.release()
Beispiel #39
0
class MCP342x_read(object):
    def __init__(self, logger, address, channel, gain, resolution):
        self.logger = logger
        self.i2c_address = address
        self.channel = channel
        self.gain = gain
        self.resolution = resolution
        if GPIO.RPI_INFO['P1_REVISION'] in [2, 3]:
            self.I2C_bus_number = 1
        else:
            self.I2C_bus_number = 0
        self.bus = smbus.SMBus(self.I2C_bus_number)
        self.lock_file = "/var/lock/mycodo_adc_0x{:02X}.pid".format(
            self.i2c_address)

    def setup_lock(self):
        self.execution_timer = timeit.default_timer()
        try:
            self.lock = LockFile(self.lock_file)
            while not self.lock.i_am_locking():
                try:
                    self.logger.debug(
                        "[Analog->Digital Converter 0x{:02X}] Acquiring Lock: {}"
                        .format(self.i2c_address, self.lock.path))
                    self.lock.acquire(timeout=60)  # wait up to 60 seconds
                except:
                    self.logger.warning(
                        "[Analog->Digital Converter 0x{:02X}] Waited 60 seconds. Breaking lock to acquire {}"
                        .format(self.i2c_address, self.lock.path))
                    self.lock.break_lock()
                    self.lock.acquire()
            self.logger.debug(
                "[Analog->Digital Converter 0x{:02X}] Acquired Lock: {}".
                format(self.i2c_address, self.lock.path))
            self.logger.debug(
                "[Analog->Digital Converter 0x{:02X}] Executed in {}ms".format(
                    self.i2c_address,
                    (timeit.default_timer() - self.execution_timer) * 1000))
            return 1, "Success"
        except Exception as msg:
            return 0, "Analog->Digital Converter Fail: {}".format(msg)

    def release_lock(self):
        self.lock.release()

    def read(self):
        try:
            time.sleep(0.1)
            self.setup_lock()
            adc = MCP342x(self.bus,
                          self.i2c_address,
                          channel=self.channel - 1,
                          gain=self.gain,
                          resolution=self.resolution)
            response = adc.convert_and_read()
            self.release_lock()
            return 1, response
        except Exception as msg:
            self.release_lock()
            return 0, "Fail: {}".format(msg)
Beispiel #40
0
def dumpTasks(filename, tasklist):
	lock = LockFile(LOCK_FILE)
	lock.acquire()
	with open(filename, 'w') as f:
		f.write("[\n  ")
		f.write(",\n  ".join(json.dumps(task) for task in tasklist))
		f.write("\n]\n")
	lock.release()
Beispiel #41
0
    def on_post(self, req, resp, id):
        try:
            user = req.context['user']

            if (not user.is_logged_in()) or (not user.is_org()):
                resp.status = falcon.HTTP_400
                return

            # Kontrola zamku
            lock = util.lock.git_locked()
            if lock:
                req.context['result'] = ('GIT uzamcen zamkem ' + lock +
                                         '\nNekdo momentalne provadi akci s '
                                         'gitem, opakujte prosim akci za 20 '
                                         'sekund.')
                resp.status = falcon.HTTP_409
                return

            pullLock = LockFile(util.admin.waveDiff.LOCKFILE)
            pullLock.acquire(60)  # Timeout zamku je 1 minuta

            # Fetch
            repo = git.Repo(util.git.GIT_SEMINAR_PATH)
            repo.remotes.origin.fetch()

            # Ulohy ve vlne
            tasks = session.query(model.Task).\
                filter(model.Task.wave == id).all()

            # Diffujeme adresare uloh task.git_commit oproti HEAD
            for task in tasks:
                if ((not task.git_branch) or (not task.git_path)
                        or (not task.git_commit)):
                    task.deploy_status = 'default'
                    continue

                # Checkout && pull vetve ve ktere je uloha
                repo.git.checkout(task.git_branch)
                repo.remotes.origin.pull()

                # Kontrola existence adresare ulohy
                if os.path.isdir(util.git.GIT_SEMINAR_PATH + task.git_path):
                    hcommit = repo.head.commit
                    diff = hcommit.diff(task.git_commit, paths=[task.git_path])
                    if len(diff) > 0:
                        task.deploy_status = 'diff'
                else:
                    task.deploy_status = 'default'

            session.commit()
            req.context['result'] = {}
        except SQLAlchemyError:
            session.rollback()
            req.context['result'] = 'Nastala vyjimka backendu'
            raise
        finally:
            pullLock.release()
            session.close()
Beispiel #42
0
def get_auth_token(use_client_file=True, **kwargs):
    ctx.logger.info("In auth.get_auth_token")
    if use_client_file:
        if constants.AUTH_TOKEN_VALUE in ctx.instance.runtime_properties:
            # If you are here , it means that this is during bootstrap
            ctx.logger.info("In auth.get_auth_token returning token from runtime props")
            ctx.logger.info("In auth.get_auth_token token from runtime props is:{}".format(ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE]))
            return ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE]

        # Check if token file exists on the client's VM. If so, take the value from it and set it in the runtime
        ctx.logger.info("In auth.get_auth_token checking local azure file path {}".format(constants.path_to_local_azure_token_file))
        if os.path.isfile(constants.path_to_local_azure_token_file):
            # If you are here , it means that this is during bootstrap
            ctx.logger.info("{} exists".format(constants.path_to_local_azure_token_file))
            token, token_expires = get_token_from_client_file()
            ctx.logger.info("get_auth_token expiry is {} ".format(token_expires))
            ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE] = token
            ctx.instance.runtime_properties[constants.AUTH_TOKEN_EXPIRY] = token_expires
            ctx.logger.info("get_auth_token token1 is {} ".format(token))
            return token

    # From here, this is not during bootstrap, which also means that this code runs on the manager's VM.
    try:
        ctx.logger.info("In auth.get_auth_token b4 locking {}".format(constants.path_to_azure_conf))
        lock = LockFile(constants.path_to_azure_conf)
        lock.acquire()
        ctx.logger.info("{} is locked".format(lock.path))
        with open(constants.path_to_azure_conf, 'r') as f:
            json_data = json.load(f)
            token_expires = json_data["token_expires"]
            token = json_data["auth_token"]
            ctx.logger.info("get_auth_token token2 is {} ".format(token))
    except:
        raise NonRecoverableError("Failures while locking or using {}".format(constants.path_to_azure_conf))

    ctx.logger.info("In auth.get_auth_token b4 timestamp")
    timestamp = int(time.time())
    ctx.logger.info("In auth.get_auth_token timestamp is {}".format(timestamp))
    ctx.logger.info("In auth.get_auth_token token_expires1 is {}".format(token_expires))
    token_expires = int(token_expires)
    ctx.logger.info("In auth.get_auth_token token_expires2 is {}".format(token_expires))
    if token_expires-timestamp <= 600 or token_expires == 0 or token is None or token == "":
        ctx.logger.info("In auth.get_auth_token token_expires-timestamp {}".format(token_expires-timestamp))
        endpoints, payload = _get_payload_endpoints()
        token, token_expires = _get_token_value_expiry(endpoints, payload)
        ctx.logger.info("get_auth_token token3 is {} ".format(token))
        ctx.logger.info("In auth.get_auth_token b4 opening {}".format(constants.path_to_azure_conf))
        with open(constants.path_to_azure_conf, 'r+') as f:
            json_data = json.load(f)
            json_data["auth_token"] = token
            json_data["token_expires"] = token_expires
            f.seek(0)
            f.write(json.dumps(json_data))
            f.close()
    lock.release()
    ctx.logger.info("{} is released".format(lock.path))
    ctx.logger.info("get_auth_token token4 is {} ".format(token))
    return token
Beispiel #43
0
    def run(self):
        cmdline = sys.argv[1:]  # Grab args from cmdline

        # If we are running in an openshift pod (via `oc new-app`) then
        # there is no cmdline but we want to default to "atomicapp run".
        # In this case copy files to cwd and use the working directory.
        if Utils.running_on_openshift():
            cmdline = 'run -v --dest=none /{}'.format(APP_ENT_PATH).split()

        # We want to be able to place options anywhere on the command
        # line. We have added all global options to each subparser,
        # but subparsers require all options to be after the 'action'
        # keyword. In order to handle this we just need to figure out
        # what subparser will be used and move it's keyword to the front
        # of the line.
        # NOTE: Also allow "mode" to override 'action' if specified
        args, _ = self.parser.parse_known_args(cmdline)
        cmdline.remove(args.action)     # Remove 'action' from the cmdline
        if args.mode:
            args.action = args.mode     # Allow mode to override 'action'
        cmdline.insert(0, args.action)  # Place 'action' at front
        logger.info("Action/Mode Selected is: %s" % args.action)

        # Finally, parse args and give error if necessary
        args = self.parser.parse_args(cmdline)

        # Set logging level
        if args.verbose:
            set_logging(level=logging.DEBUG)
        elif args.quiet:
            set_logging(level=logging.WARNING)
        else:
            set_logging(level=logging.INFO)

        lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE))
        try:
            lock.acquire(timeout=-1)
            args.func(args)
        except AttributeError:
            if hasattr(args, 'func'):
                raise
            else:
                self.parser.print_help()
        except KeyboardInterrupt:
            pass
        except AlreadyLocked:
            logger.error("Could not proceed - there is probably another instance of Atomic App running on this machine.")
        except Exception as ex:
            if args.verbose:
                raise
            else:
                logger.error("Exception caught: %s", repr(ex))
                logger.error(
                    "Run the command again with -v option to get more information.")
        finally:
            if lock.i_am_locking():
                lock.release()
Beispiel #44
0
    def main(username, password):
        # Ignore error, logging set up in logging utils
        from . import logging_utils
        from .navigation import Leifur
        from .config import get_config, set_config, get_config_from_user
        from .connectivity import check_internet_connection
        from .update_checker import check_for_updates

        lock = LockFile('/tmp/spoppy')

        try:
            try:
                # Try for 1s to acquire the lock
                lock.acquire(1)
            except LockTimeout:
                click.echo('Could not acquire lock, is spoppy running?')
                click.echo(
                    'If you\'re sure that spoppy is not running, '
                    'try removing the lock file %s' % lock.lock_file
                )
                click.echo(
                    'You can try removing the lock file by responding [rm]. '
                    'spoppy will exit on all other inputs'
                )
                try:
                    response = raw_input('')
                except NameError:
                    response = input('')
                if response == 'rm':
                    lock.break_lock()
                else:
                    raise TypeError('Could not get lock')
        except TypeError:
            pass
        else:
            check_internet_connection()
            # Check for updates
            check_for_updates(click, get_version(), lock)

            if username and password:
                set_config(username, password)
            else:
                username, password = get_config()
            if not (username and password):
                username, password = get_config_from_user()

            navigator = None
            try:
                navigator = Leifur(username, password)
                navigator.start()
            finally:
                if navigator:
                    navigator.shutdown()
                logger.debug('Finally, bye!')
        finally:
            if lock.i_am_locking():
                lock.release()
Beispiel #45
0
    def main(username, password):
        from . import logging_utils
        logging_utils.configure_logging()
        from .navigation import Leifur
        from .config import get_config, set_config, get_config_from_user
        from .connectivity import check_internet_connection
        from .update_checker import check_for_updates

        lock = LockFile('/tmp/spoppy')

        try:
            try:
                # Try for 1s to acquire the lock
                lock.acquire(1)
            except LockTimeout:
                click.echo('Could not acquire lock, is spoppy running?')
                click.echo(
                    'If you\'re sure that spoppy is not running, '
                    'try removing the lock file %s' % lock.lock_file
                )
                click.echo(
                    'You can try removing the lock file by responding [rm]. '
                    'spoppy will exit on all other inputs'
                )
                try:
                    response = raw_input('')
                except NameError:
                    response = input('')
                if response == 'rm':
                    lock.break_lock()
                else:
                    raise TypeError('Could not get lock')
        except TypeError:
            pass
        else:
            check_internet_connection()
            # Check for updates
            check_for_updates(click, get_version(), lock)

            if username and password:
                set_config(username, password)
            else:
                username, password = get_config()
            if not (username and password):
                username, password = get_config_from_user()

            navigator = None
            try:
                navigator = Leifur(username, password)
                navigator.start()
            finally:
                if navigator:
                    navigator.shutdown()
                logger.debug('Finally, bye!')
        finally:
            if lock.i_am_locking():
                lock.release()
Beispiel #46
0
	def on_post(self, req, resp, id):
		try:
			user = req.context['user']

			# Kontrola opravneni
			if (not user.is_logged_in()) or (not user.is_org()):
				req.context['result'] = 'Nedostatecna opravneni'
				resp.status = falcon.HTTP_400
				return

			# Kontrola existence ulohy
			task = session.query(model.Task).get(id)
			if task is None:
				req.context['result'] = 'Neexistujici uloha'
				resp.status = falcon.HTTP_404
				return

			# Zverejnene ulohy mohou deployovat pouze admini
			wave = session.query(model.Wave).get(task.wave)
			if (datetime.datetime.utcnow() > wave.time_published) and (not user.is_admin()):
				req.context['result'] = 'Po zverejneni ulohy muze deploy provest pouze administrator'
				resp.status = falcon.HTTP_404
				return

			# Kontrola existence gitovske vetve a adresare v databazi
			if (task.git_branch is None) or (task.git_path is None):
				req.context['result'] = 'Uloha nema zadanou gitovskou vetev nebo adresar'
				resp.status = falcon.HTTP_400
				return

			# Kontrola zamku
			lock = util.lock.git_locked()
			if lock:
				req.context['result'] = 'GIT uzamcen zamkem ' + lock + "\nNekdo momentalne provadi akci s gitem, opakujte prosim akci za 20 sekund."
				resp.status = falcon.HTTP_409
				return

			# Stav na deploying je potreba nastavit v tomto vlakne
			task.deploy_status = 'deploying'
			session.commit()

			try:
				deployLock = LockFile(util.admin.taskDeploy.LOCKFILE)
				deployLock.acquire(60) # Timeout zamku je 1 minuta
				deployThread = threading.Thread(target=util.admin.taskDeploy.deploy, args=(task.id, deployLock, scoped_session(_session)), kwargs={})
				deployThread.start()
			finally:
				deployLock.release()

			resp.status = falcon.HTTP_200
		except SQLAlchemyError:
			session.rollback()
			raise
		finally:
			session.close()
    def pre_process(self, params):
        """
        Converts the files

        First pass is to create striped tiff using kakadu if available
        and second pass is to convert to tiled tiff.

        A third file path is used for lock, if the lock can be acquired
        and the output is not ready then create it.  If the lock cannot
        be acquired then perhaps other process is processing it.

        TODO: Decide when to declare not possible ?
        """
        # Split the requested filename
        dirname, filename = os.path.split(params["fname"])
        _, ext = os.path.splitext(filename)

        # assert that ext is as expected
        assert ext in [".jp2", ".j2k"]

        output1 = os.path.join(dirname, filename + "_striped.tif")
        output2 = os.path.join(dirname, filename + "_tiled.tif")
        lock_path = os.path.join(dirname, filename + ".lock")

        lock = LockFile(lock_path)
        # logger.info('waiting for lock')
        lock.acquire()
        # If the file is missing then create it
        if not os.path.exists(output2):
            # Make sure the processing lock can be acquired
            logger.info('processing')

            logger.info('# Convert to striped tiff')
            if self.kakadu_dir is None:
                params = ["gdal_translate", params["fname"], output1]
                subprocess.call(params)
            else:
                # Additional LD_LIBRARY_PATH
                environ = os.environ.copy()

                if "LD_LIBRARY_PATH" not in environ:
                    environ["LD_LIBRARY_PATH"] = ""

                environ["LD_LIBRARY_PATH"] = self.kakadu_dir + ":" + environ["LD_LIBRARY_PATH"]
                params = [os.path.join(self.kakadu_dir, "kdu_expand"), "-i", params["fname"], "-o", output1]
                subprocess.call(params, env=environ)

            logger.info('# Convert to tiled tiff')
            params = ["gdal_translate", "-co", "TILED=YES", "-co", "COMPRESS=JPEG", output1, output2]
            subprocess.call(params)

            # Then remove output1
            os.remove(output1)
        lock.release()
        return output2
    def _download_rpm(self, nvr, arch):
        if nvr is None or arch is None:
            raise ValueError("Invalid option passed to connector")

        filename = '%s.%s.rpm' % (nvr, arch)
        file_path = os.path.split(filename)
        if file_path[0] != '':
            raise ValueError("Nvr can not contain path elements")
        if len(arch.split('/')) != 1 or os.path.split(arch)[0] != '':
            raise ValueError("Arch can not contain path elements")

        rpm_file_path = os.path.join(self._rpm_cache, filename)
        if os.path.exists(rpm_file_path):
            return rpm_file_path

        lockfile = LockFile(file_path)
        if lockfile.is_locked():
            # block until the lock is released and then assume other
            # thread was successful
            lockfile.acquire()
            lockfile.release()
            return rpm_file_path

        # acquire the lock and release when done
        lockfile.acquire()
        try:
            info = self.call('getBuild', {'buildInfo': nvr})
            if info is None:
                return {'error': 'No such build (%s)' % filename}

            if not os.path.exists(self._rpm_cache):
                os.mkdir(self._rpm_cache, )

            url = '%s/%s/%s/%s/%s/%s' % (self._koji_pkg_url, info['name'],
                                         info['version'], info['release'],
                                         arch, filename)

            url_file = grabber.urlopen(url, text=filename)
            out = os.open(rpm_file_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
                          0666)
            try:
                while 1:
                    buf = url_file.read(4096)
                    if not buf:
                        break
                    os.write(out, buf)
            except Exception as e:
                raise e
            finally:
                os.close(out)
                url_file.close()
        finally:
            lockfile.release()

        return rpm_file_path
Beispiel #49
0
    def _download_rpm(self, nvr, arch):
        if nvr is None or arch is None:
            raise ValueError("Invalid option passed to connector")

        filename = '%s.%s.rpm' % (nvr, arch)
        file_path = os.path.split(filename)
        if file_path[0] != '':
            raise ValueError("Nvr can not contain path elements")
        if len(arch.split('/')) != 1 or os.path.split(arch)[0] != '':
            raise ValueError("Arch can not contain path elements")

        rpm_file_path = os.path.join(self._rpm_cache, filename)
        if os.path.exists(rpm_file_path):
            return rpm_file_path

        lockfile = LockFile(file_path)
        if lockfile.is_locked():
            # block until the lock is released and then assume other
            # thread was successful
            lockfile.acquire()
            lockfile.release()
            return rpm_file_path

        # acquire the lock and release when done
        lockfile.acquire()
        try:
            info = self.call('getBuild', {'buildInfo': nvr})
            if info is None:
                return {'error': 'No such build (%s)' % filename}

            if not os.path.exists(self._rpm_cache):
                os.mkdir(self._rpm_cache,)

            url = '%s/%s/%s/%s/%s/%s' % (
                self._koji_pkg_url, info['name'], info['version'],
                info['release'], arch, filename)

            url_file = grabber.urlopen(url, text=filename)
            out = os.open(
                rpm_file_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0666)
            try:
                while 1:
                    buf = url_file.read(4096)
                    if not buf:
                        break
                    os.write(out, buf)
            except Exception as e:
                raise e
            finally:
                os.close(out)
                url_file.close()
        finally:
            lockfile.release()

        return rpm_file_path
Beispiel #50
0
def loadTasks(filename):
	lock = LockFile(LOCK_FILE)
	lock.acquire()
	with open(filename) as f:
		content = f.read()
	if len(content.strip()) == 0:
		ret = []
	else:
		ret = json.loads(content)
	lock.release()
	return ret
Beispiel #51
0
class Lock():
    """Simple implementation of a mutex lock using the file systems. Works on
    *nix systems."""

    path = None
    lock = None

    def __init__(self, path):
        try:
            from lockfile import LockFile
        except ImportError:
            from lockfile import FileLock
            # Different naming in older versions of lockfile
            LockFile = FileLock

        self.path = path
        self.lock = LockFile(path)

    def obtain(self):
        import os
        import logging
        from lockfile import AlreadyLocked
        logger = logging.getLogger()

        try:
            self.lock.acquire(0)
            logger.debug("Successfully obtained lock: %s" % self.path)
        except AlreadyLocked:
            return False

        return True

    def release(self):
        import os
        import logging
        logger = logging.getLogger()

        if not self.has_lock():
            raise Exception(
                "Unable to release lock that is owned by another process")

        self.lock.release()
        logger.debug("Successfully released lock: %s" % self.path)

    def has_lock(self):
        return self.lock.i_am_locking()

    def clear(self):
        import os
        import logging
        logger = logging.getLogger()

        self.lock.break_lock()
        logger.debug("Successfully cleared lock: %s" % self.path)
Beispiel #52
0
class State(object):

    def __init__(self, path=None, lock=False):
        self.path = path
        self.lock = lock
        if not self.path:
            self.path = join(util.get_home_dir(), "appstate.json")
        self._state = {}
        self._prev_state = {}
        self._lockfile = None

    def __enter__(self):
        try:
            self._lock_state_file()
            if isfile(self.path):
                self._state = util.load_json(self.path)
        except ValueError:
            self._state = {}
        self._prev_state = deepcopy(self._state)
        return self._state

    def __exit__(self, type_, value, traceback):
        if self._prev_state != self._state:
            with open(self.path, "w") as fp:
                if "dev" in __version__:
                    json.dump(self._state, fp, indent=4)
                else:
                    json.dump(self._state, fp)
        self._unlock_state_file()

    def _lock_state_file(self):
        if not self.lock:
            return
        self._lockfile = LockFile(self.path)

        if self._lockfile.is_locked() and \
                (time() - getmtime(self._lockfile.lock_file)) > 10:
            self._lockfile.break_lock()

        try:
            self._lockfile.acquire()
        except LockFailed:
            raise exception.PlatformioException(
                "The directory `{0}` or its parent directory is not owned by "
                "the current user and PlatformIO can not store configuration "
                "data. \nPlease check the permissions and owner of that "
                "directory. Otherwise, please remove manually `{0}` "
                "directory and PlatformIO will create new from the current "
                "user.".format(dirname(self.path)))

    def _unlock_state_file(self):
        if self._lockfile:
            self._lockfile.release()
Beispiel #53
0
class Lock():
    """Simple implementation of a mutex lock using the file systems. Works on
    *nix systems."""

    path = None
    lock = None

    def __init__(self, path):
        try:
            from lockfile import LockFile
        except ImportError:
            from lockfile import FileLock
            # Different naming in older versions of lockfile
            LockFile = FileLock

        self.path = path
        self.lock = LockFile(path)

    def obtain(self):
        import os
        import logging
        from lockfile import AlreadyLocked
        logger = logging.getLogger()

        try:
            self.lock.acquire(0)
            logger.debug("Successfully obtained lock: %s" % self.path)
        except AlreadyLocked:
            return False

        return True

    def release(self):
        import os
        import logging
        logger = logging.getLogger()

        if not self.has_lock():
            raise Exception("Unable to release lock that is owned by another process")

        self.lock.release()
        logger.debug("Successfully released lock: %s" % self.path)

    def has_lock(self):
        return self.lock.i_am_locking()

    def clear(self):
        import os
        import logging
        logger = logging.getLogger()

        self.lock.break_lock()
        logger.debug("Successfully cleared lock: %s" % self.path)
Beispiel #54
0
    def renumber_dsn(self, wdmpath, odsn, ndsn):
        """Will renumber the odsn to the ndsn."""
        odsn = int(odsn)
        ndsn = int(ndsn)

        lock = LockFile(wdmpath)
        lock.acquire()
        wdmfp = self._open(wdmpath, 51)
        retcode = self.wddsrn(wdmfp, odsn, ndsn)
        lock.release()
        self._close(wdmpath)
        self._retcode_check(retcode, additional_info='wddsrn')
Beispiel #55
0
class State(object):

    def __init__(self, path=None, lock=False):
        self.path = path
        self.lock = lock
        if not self.path:
            self.path = join(util.get_home_dir(), "appstate.json")
        self._state = {}
        self._prev_state = {}
        self._lockfile = None

    def __enter__(self):
        try:
            self._lock_state_file()
            if isfile(self.path):
                self._state = util.load_json(self.path)
        except exception.PlatformioException:
            self._state = {}
        self._prev_state = deepcopy(self._state)
        return self._state

    def __exit__(self, type_, value, traceback):
        if self._prev_state != self._state:
            with open(self.path, "w") as fp:
                if "dev" in __version__:
                    json.dump(self._state, fp, indent=4)
                else:
                    json.dump(self._state, fp)
        self._unlock_state_file()

    def _lock_state_file(self):
        if not self.lock:
            return
        self._lockfile = LockFile(self.path)

        if self._lockfile.is_locked() and \
                (time() - getmtime(self._lockfile.lock_file)) > 10:
            self._lockfile.break_lock()

        try:
            self._lockfile.acquire()
        except LockFailed:
            raise exception.PlatformioException(
                "The directory `{0}` or its parent directory is not owned by "
                "the current user and PlatformIO can not store configuration "
                "data. \nPlease check the permissions and owner of that "
                "directory. Otherwise, please remove manually `{0}` "
                "directory and PlatformIO will create new from the current "
                "user.".format(dirname(self.path)))

    def _unlock_state_file(self):
        if self._lockfile:
            self._lockfile.release()
Beispiel #56
0
        def do_lock(*args, **kwargs):
            lock = LockFile(path)
            while not lock.i_am_locking():
                try:
                    lock.acquire(timeout=5)
                except LockTimeout:
                    lock.break_lock()

            try:
                rv = f(*args, **kwargs)
            finally:
                lock.release()
            return rv