Esempio n. 1
0
File: db.py Progetto: fparrel/regepe
def DbBuildInvertOld(ele,invfunc):
    raise Exception('Deprecated')
    # Check ele
    if ele not in ELELIST['maps']:
        raise Exception('Invalid element')
    # Target inv db
    dbfileinv = ele.upper()+'_INV.db'
    # Lock and open inv db
    lock = FileLock(dbfileinv,5)
    lock.acquire()
    dbinv = anydbm.open(dbfileinv,'c')
    # Clear inv db
    dbinv.clear()
    # List dir
    for mapdbfile in os.listdir('maps'):
        mapid = mapdbfile[:-3]
        dbmap = anydbm.open('maps/%s' % mapdbfile,'r')
        if dbmap.has_key(ele):
            value = dbmap[ele]
            for word in invfunc(value):
                if dbinv.has_key(word):
                    dbinv[word] = dbinv[word] + (',%s' % mapid)
                else:
                    dbinv[word] = '%s' % mapid
        dbmap.close()
    dbinv.close()
    lock.release()
    # Rebuild is no more needed
    RearmRebuild(ele)
Esempio n. 2
0
File: db.py Progetto: fparrel/regepe
def DbBuildInvert(dbtype,ele,invfunc):
    if dbtype not in DBTYPES:
        raise Exception('Invalid database type')
    # Check ele
    if ele not in ELELIST[dbtype]:
        raise Exception('Invalid element')
    #print '<!-- DbBuildInvert -->\n'
    # Target inv db
    dbfileinv = ele.upper()+'_INV.db'
    # Lock and open inv db
    lock = FileLock(dbfileinv,5)
    lock.acquire()
    #Log('DbBuildInvert open db c %s\n'%dbfileinv)
    dbinv = anydbm.open(dbfileinv,'c')
    # Clear inv db
    dbinv.clear()
    # List dir
    for dbfile in os.listdir(dbtype):
        id = dbfile[:-3]
        #Log('DbBuildInvert open db r %s/%s\n'%(dbtype,dbfile))
        db = anydbm.open('%s/%s' % (dbtype,dbfile),'r')
        if db.has_key(ele):
            value = db[ele]
            for word in invfunc(value):
                if dbinv.has_key(word):
                    dbinv[word] = dbinv[word] + (',%s' % id)
                else:
                    dbinv[word] = '%s' % id
        db.close()
        #Log('DbBuildInvert close db r %s/%s\n'%(dbtype,dbfile))
    dbinv.close()
    #Log('DbBuildInvert close db c %s\n'%dbfileinv)
    lock.release()
    # Rebuild is no more needed
    RearmRebuild(ele)
Esempio n. 3
0
File: db.py Progetto: fparrel/regepe
def DbAddComment(mapid,user,comment):
    mapfile = 'maps/%s.db' % mapid
    if not os.access(mapfile,os.F_OK):
        raise Exception('Invalid map id %s' % mapid)
    d = getCurrentDate()
    lock = FileLock(mapfile,5)
    lock.acquire()
    #Log('DbAddComment open db r %s\n' % mapfile)
    db = anydbm.open(mapfile,'r')
    if db.has_key('last_comment_id'):
        last_comment_id = int(db['last_comment_id'])
    else:
        last_comment_id = 0
    db.close()
    #Log('DbAddComment close db r %s\n' % mapfile)
    last_comment_id += 1
    if last_comment_id>99999:
        lock.release()
        raise Exception('Max comments reached')
    #Log('DbAddComment open db c %s\n' % mapfile)
    db = anydbm.open(mapfile,'c')
    db['last_comment_id'] = str(last_comment_id)
    db['comment%.5d'%last_comment_id] = '%s,%s,%s' % (d,user,comment)
    db.close()
    #Log('DbAddComment close db c %s\n' % mapfile)
    lock.release()
Esempio n. 4
0
        def run(self):
                while True:
                        lock = FileLock("/var/lock/baseDaemon.lock")

                        #Just to be safe; pulling data shouldn't take more than 2h
                        lock.acquire()

                        wikiDatesToMongo(False)
                        time.sleep(2*60*60)

                        lock.release()
Esempio n. 5
0
 def update(self, lock=True):
     if lock:
         flock = FileLock(self.inifile)
         flock.acquire()
     
     try:
         inifp = open(self.inifile, 'w')
         self.cfg.write(inifp)
         inifp.close()
         if lock: flock.release()
         return True
     except:
         if lock: flock.release()
         return False
Esempio n. 6
0
 def runjobs(self, verbose = None):
     """ Run the jobs """
     # The verbose flag
     if verbose is not None:
         self.verbose = verbose
     # Find all 'job_' methods
     for (exc_name, exc_value) in inspect.getmembers(self, lambda x: inspect.ismethod(x)):
         # The method's name must start with uppercase
         if exc_name[0].isupper():
             lock = FileLock(os.path.join('/', 'tmp', 'TranPy-%s-%s.lock' % (self.job_class, exc_name)))
             # Try to get the lock
             if lock.acquire():
                 if self.verbose:
                     print >>sys.stderr, 'Running %s %s' % (self.job_class, exc_name)
                 # Run the job
                 try:
                     exc_value()
                 except:
                     if self.verbose:
                         traceback.print_exc(file = sys.stderr)
                 finally:
                     # Release the lock
                     lock.release()
             else:
                 if self.verbose:
                     print >>sys.stderr, 'Locked %s %s' % (self.job_class, exc_name)
Esempio n. 7
0
    def __init__(self,
                 i,
                 model_cls,
                 batch_size,
                 all_reduce_alg=None,
                 num_devices=1,
                 use_cpus=False,
                 max_bytes=0,
                 use_xray=True,
                 plasma_op=False,
                 verbose=False):
        # TODO - just port VariableMgrLocalReplicated
        if use_xray:
            if num_devices == 4:
                gpu0 = FileLock("/tmp/gpu0")
                gpu1 = FileLock("/tmp/gpu1")
                try:
                    gpu0.acquire(timeout=0)
                    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
                except:
                    gpu1.acquire(timeout=0)
                    os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
            else:
                os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
                print("CUDA VISIBLES", os.environ["CUDA_VISIBLE_DEVICES"])
        self.i = i
        assert num_devices > 0
        tf_session_args = {
            "device_count": {
                "CPU": num_devices
            },
            "log_device_placement": False,
            "gpu_options": tf.GPUOptions(force_gpu_compatible=True),
            "inter_op_parallelism_threads": 128,
        }
        config_proto = tf.ConfigProto(**tf_session_args)
        self.sess = tf.Session(config=config_proto)
        models = []
        grad_ops = []
        self.iter = 0

        if use_cpus:
            device_tmpl = "/cpu:%d"
        else:
            device_tmpl = "/gpu:%d"
        for device_idx in range(num_devices):
            device = device_tmpl % device_idx
            with tf.device(device):
                with tf.variable_scope("device_%d" % device_idx):
                    print("DEVICE: ", device)
                    model = model_cls(batch=batch_size,
                                      use_cpus=use_cpus,
                                      device=device)
                    models += [model]
                    model.grads = [
                        t
                        for t in model.optimizer.compute_gradients(model.loss)
                        if t[0] is not None
                    ]
                    grad_ops.append(model.grads)

        self.models = models
        if num_devices == 1:
            assert not max_bytes, "Not supported with 1 GPU"
            self.packed_grads_and_vars = grad_ops
        elif all_reduce_alg:
            if max_bytes:
                from tfbench import modified_allreduce
                self.packed_grads_and_vars, packing_vals = modified_allreduce.sum_gradients_all_reduce(
                    "",
                    grad_ops,
                    1,
                    all_reduce_alg,
                    1,
                    list(range(num_devices)),
                    agg_small_grads_max_bytes=max_bytes)
            else:
                self.packed_grads_and_vars = allreduce.sum_gradients_all_reduce(
                    "", grad_ops, 1, all_reduce_alg, 1,
                    list(range(num_devices)))
        self.per_device_grads = [
            list(zip(*dev_gv))[0] for dev_gv in self.packed_grads_and_vars
        ]
        assert (len(self.per_device_grads) == num_devices)
        self.num_grads = num_grads = len(self.packed_grads_and_vars[0])
        if max_bytes:
            assert (num_grads < 314)
            print("Packed grads => {} tensors".format(num_grads))
        else:
            assert (num_grads == 314)

        # Ops for reading grads with the right control deps
        nccl_noops = []
        for j in range(num_grads)[::-1]:
            with tf.control_dependencies(
                    nccl_noops +
                [dev_grad[j] for dev_grad in self.per_device_grads]):
                nccl_noops = [tf.no_op()]

        # You must fetch this otherwise the NCCL allreduce will hang
        self.nccl_control_out = tf.group(*nccl_noops)

        round_robin_devices = False
        if args.plasma_op:
            memcpy_plasma_module = tf.load_op_library(
                "/home/ubuntu/osdi2018/ops/memcpy_plasma_op.so")

            # For fetching grads -> plasma
            self.plasma_in_grads = []
            self.plasma_in_grads_oids = [
                tf.placeholder(shape=[], dtype=tf.string)
                for _ in range(num_grads)
            ]
            ix = 0
            for j in range(num_grads):
                grad = self.per_device_grads[ix][j]
                if round_robin_devices:
                    ix += 1  # round robin assignment
                ix %= num_devices
                with tf.device(self.models[ix].device):
                    plasma_grad = memcpy_plasma_module.tensor_to_plasma(
                        [grad],
                        self.plasma_in_grads_oids[j],
                        plasma_store_socket_name=ray.worker.global_worker.
                        plasma_client.store_socket_name,
                        plasma_manager_socket_name=ray.worker.global_worker.
                        plasma_client.manager_socket_name)
                self.plasma_in_grads.append(plasma_grad)

            # For applying grads <- plasma
            unpacked_gv = []
            self.plasma_out_grads_oids = [
                tf.placeholder(shape=[], dtype=tf.string)
                for _ in range(num_grads)
            ]
            packed_plasma_grads = []
            ix = 0
            for j in range(num_grads):
                with tf.device(self.plasma_in_grads[j].device):
                    with tf.control_dependencies([self.plasma_in_grads[j]]):
                        grad_ph = memcpy_plasma_module.plasma_to_tensor(
                            self.plasma_out_grads_oids[j],
                            plasma_store_socket_name=ray.worker.global_worker.
                            plasma_client.store_socket_name,
                            plasma_manager_socket_name=ray.worker.
                            global_worker.plasma_client.manager_socket_name)
                grad_ph = tf.reshape(grad_ph,
                                     self.packed_grads_and_vars[0][j][0].shape)
                print("Packed tensor", grad_ph)
                packed_plasma_grads.append(grad_ph)
            for i in range(num_devices):
                per_device = []
                for j, (g, v) in enumerate(self.packed_grads_and_vars[i]):
                    grad_ph = packed_plasma_grads[j]
                    per_device.append((grad_ph, v))
                unpacked_gv.append(per_device)

            if max_bytes:
                unpacked_gv = allreduce.unpack_small_tensors(
                    unpacked_gv, packing_vals)

        elif max_bytes:
            unpacked_gv = allreduce.unpack_small_tensors(
                self.packed_grads_and_vars, packing_vals)
        else:
            unpacked_gv = self.packed_grads_and_vars

        # Same shape as packed_grads_and_vars
        assert len(unpacked_gv) == num_devices
        assert len(unpacked_gv[0]) == 314
        assert len(unpacked_gv[0][0]) == 2

        apply_ops = []
        to_apply = unpacked_gv[0]
        for ix, m in enumerate(models):
            apply_ops.append(
                m.optimizer.apply_gradients([
                    (g, v)
                    for ((g, _), (_, v)) in zip(to_apply, unpacked_gv[ix])
                ]))
        self.apply_op = tf.group(*apply_ops)
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        self.sess.run(init_op)
Esempio n. 8
0
def main():
    ret = SUCCESS_EXITVAL

    parser = argparse.ArgumentParser(description='project mirroring',
                                     parents=[get_base_parser(
                                         tool_version=__version__)
                                     ])

    parser.add_argument('project', nargs='*', default=None)
    parser.add_argument('-a', '--all', action='store_true',
                        help='mirror all indexed projects', default=False)
    parser.add_argument('-c', '--config',
                        help='config file in JSON/YAML format')
    parser.add_argument('-U', '--uri', default='http://localhost:8080/source',
                        help='uri of the webapp with context path')
    parser.add_argument('-b', '--batch', action='store_true',
                        help='batch mode - will log into a file')
    parser.add_argument('-B', '--backupcount', default=8,
                        help='how many log files to keep around in batch mode')
    parser.add_argument('-I', '--check-changes', action='store_true',
                        help='Check for changes in the project or its'
                             ' repositories,'
                             ' terminate the processing'
                             ' if no change is found.')
    parser.add_argument('-w', '--workers', default=cpu_count(),
                        help='Number of worker processes')

    try:
        args = parser.parse_args()
    except ValueError as e:
        return fatal(e, False)

    logger = get_console_logger(get_class_basename(), args.loglevel)

    if len(args.project) > 0 and args.all:
        return fatal("Cannot use both project list and -a/--all", False)

    if not args.all and len(args.project) == 0:
        return fatal("Need at least one project or --all", False)

    if args.config:
        config = read_config(logger, args.config)
        if config is None:
            return fatal("Cannot read config file from {}".
                         format(args.config), False)
    else:
        config = {}

    uri = args.uri
    if not is_web_uri(uri):
        return fatal("Not a URI: {}".format(uri), False)
    logger.debug("web application URI = {}".format(uri))

    if not check_configuration(config):
        return 1

    # Save the source root to avoid querying the web application.
    source_root = get_config_value(logger, 'sourceRoot', uri)
    if not source_root:
        return 1

    logger.debug("Source root = {}".format(source_root))

    hookdir = config.get(HOOKDIR_PROPERTY)
    if hookdir:
        logger.debug("Hook directory = {}".format(hookdir))

    command_timeout = get_int(logger, "command timeout",
                              config.get(CMD_TIMEOUT_PROPERTY))
    if command_timeout:
        logger.debug("Global command timeout = {}".format(command_timeout))

    hook_timeout = get_int(logger, "hook timeout",
                           config.get(HOOK_TIMEOUT_PROPERTY))
    if hook_timeout:
        logger.debug("Global hook timeout = {}".format(hook_timeout))

    logdir = None
    # Log messages to dedicated log file if running in batch mode.
    if args.batch:
        logdir = config.get(LOGDIR_PROPERTY)
        if not logdir:
            return fatal("The {} property is required in batch mode".
                         format(LOGDIR_PROPERTY), False)

    projects = args.project
    if len(projects) == 1:
        lockfile = projects[0] + "-mirror"
    else:
        lockfile = os.path.basename(sys.argv[0])

    if args.all:
        projects = list_indexed_projects(logger, args.uri)

    lock = FileLock(os.path.join(tempfile.gettempdir(), lockfile + ".lock"))
    try:
        with lock.acquire(timeout=0):
            with Pool(processes=int(args.workers)) as pool:
                worker_args = []
                for x in projects:
                    worker_args.append([x, logdir, args.loglevel,
                                        args.backupcount, config,
                                        args.check_changes,
                                        args.uri, source_root,
                                        args.batch])
                try:
                    project_results = pool.map(worker, worker_args, 1)
                except KeyboardInterrupt:
                    return FAILURE_EXITVAL
                else:
                    if any([x == FAILURE_EXITVAL for x in project_results]):
                        ret = FAILURE_EXITVAL
                    if all([x == CONTINUE_EXITVAL for x in project_results]):
                        ret = CONTINUE_EXITVAL
    except Timeout:
        logger.warning("Already running, exiting.")
        return FAILURE_EXITVAL

    logging.shutdown()
    return ret
Esempio n. 9
0
class EngineService:
    """Service that provides methods for interacting with BAS engine."""

    _exe_dir: str = None
    """The path to the directory in which the executable file of the engine is located."""

    _zip_dir: str = None
    """The path to the directory in which the archive file of the engine is located."""
    def __init__(self, client):
        """Create an instance of EngineService class."""
        script_name = client.options.script_name
        working_dir = client.options.working_dir
        self._loop = client.loop

        self._script_dir = path.join(working_dir, "run", script_name)
        self._engine_dir = path.join(working_dir, "engine")
        self._script_name = script_name

        self._process = None
        self._lock = None

    async def start(self, port: int) -> None:
        """Asynchronously start the engine service with the specified port.

        Arguments:
            port (int):
                Selected port number.
        """
        arch = 64 if machine().endswith('64') else 32
        zip_name = f'FastExecuteScriptProtected.x{arch}'
        url_name = f'FastExecuteScriptProtected{arch}'

        zip_path = path.join(self._zip_dir, f'{zip_name}.zip')

        if not path.exists(self._zip_dir):
            makedirs(self._zip_dir)
            await self._download_executable(zip_path, zip_name, url_name)

        if not path.exists(self._exe_dir):
            makedirs(self._exe_dir)
            await self._extract_executable(zip_path)

        self._start_engine_process(port)
        self._clear_run_directory()

    async def initialize(self):
        url = f'{ENDPOINT}/scripts/{self._script_name}/properties'

        async with ClientSession(loop=self._loop) as session:
            async with session.get(url) as response:
                script = Script(await response.json())

        if not script.is_exist:
            raise ScriptNotExistError()

        if not script.is_supported:
            raise ScriptNotSupportedError()

        self._zip_dir = path.join(self._engine_dir, script.engine_version)
        self._exe_dir = path.join(self._script_dir, script.hash[0:5])

    async def _download_executable(self, zip_path: str, zip_name: str,
                                   url_name: str) -> None:
        url = f"{ENDPOINT}/distr/{url_name}/{path.basename(self._zip_dir)}/{zip_name}.zip"

        async with ClientSession(loop=self._loop) as session:
            async with session.get(url) as response:
                async with open(zip_path, 'wb') as file:
                    while True:
                        chunk = await response.content.read(1024 * 16)
                        if not chunk:
                            break
                        await file.write(chunk)
                    return await response.release()

    async def _extract_executable(self, zip_path: str) -> None:
        with ZipFile(zip_path, 'r') as file:

            async def task(name, zip_file: ZipFile):
                zip_file.extract(name, self._exe_dir, None)

            tasks = [task(name, file) for name in file.namelist()]
            await asyncio.wait(tasks, loop=self._loop)

    def _start_engine_process(self, port: int) -> None:
        self._process = subprocess.Popen([
            path.join(self._exe_dir, 'FastExecuteScript.exe'),
            f'--remote-control-port={port}', f'--remote-control'
        ],
                                         cwd=self._exe_dir)

        lock = self._get_lock_path()
        self._lock = FileLock(lock)
        self._lock.acquire()

    def _clear_run_directory(self) -> None:
        for dir_path in [
                name for name in listdir(self._script_dir) if path.isfile(name)
        ]:
            dir_path = path.join(self._script_dir, dir_path)
            lock_path = self._get_lock_path(dir_path)
            if not is_locked(lock_path):
                rmtree(dir_path)

    def _get_lock_path(self, dir_path=None) -> str:
        return path.join(dir_path or self._exe_dir, '.lock')

    async def close(self) -> None:
        """Close the engine service."""
        self._process.kill()
        self._lock.release()
Esempio n. 10
0
    def read(
        self, file_path: Union[Path, str]
    ) -> Union[AllennlpDataset, AllennlpLazyDataset]:
        """
        Returns an dataset containing all the instances that can be read from the file path.

        If `self.lazy` is `False`, this eagerly reads all instances from `self._read()`
        and returns an `AllennlpDataset`.

        If `self.lazy` is `True`, this returns an `AllennlpLazyDataset`, which internally
        relies on the generator created from `self._read()` to lazily produce `Instance`s.
        In this case your implementation of `_read()` must also be lazy
        (that is, not load all instances into memory at once), otherwise
        you will get a `ConfigurationError`.

        In either case, the returned `Iterable` can be iterated
        over multiple times. It's unlikely you want to override this function,
        but if you do your result should likewise be repeatedly iterable.
        """
        if not isinstance(file_path, str):
            file_path = str(file_path)

        lazy = getattr(self, "lazy", None)

        if lazy is None:
            warnings.warn(
                "DatasetReader.lazy is not set, "
                "did you forget to call the superclass constructor?",
                UserWarning,
            )

        if lazy:
            return AllennlpLazyDataset(self._instance_iterator, file_path)
        else:
            cache_file: Optional[str] = None
            if self._cache_directory:
                cache_file = self._get_cache_location_for_file_path(file_path)

            if cache_file is not None and os.path.exists(cache_file):
                try:
                    # Try to acquire a lock just to make sure another process isn't in the middle
                    # of writing to the cache.
                    cache_file_lock = FileLock(
                        cache_file + ".lock",
                        timeout=self.CACHE_FILE_LOCK_TIMEOUT)
                    cache_file_lock.acquire()
                    # We make an assumption here that if we can obtain the lock, no one will
                    # be trying to write to the file anymore, so it should be safe to release the lock
                    # before reading so that other processes can also read from it.
                    cache_file_lock.release()
                    logger.info("Reading instances from cache %s", cache_file)
                    instances = self._instances_from_cache_file(cache_file)
                except Timeout:
                    logger.warning(
                        "Failed to acquire lock on dataset cache file within %d seconds. "
                        "Cannot use cache to read instances.",
                        self.CACHE_FILE_LOCK_TIMEOUT,
                    )
                    instances = self._multi_worker_islice(
                        self._read(file_path))
            else:
                instances = self._multi_worker_islice(self._read(file_path))

            # Then some validation.
            if not isinstance(instances, list):
                instances = list(instances)

            if not instances:
                raise ConfigurationError(
                    "No instances were read from the given filepath {}. "
                    "Is the path correct?".format(file_path))

            # And finally we try writing to the cache.
            if cache_file is not None and not os.path.exists(cache_file):
                if self.max_instances is not None:
                    # But we don't write to the cache when max_instances is specified.
                    logger.warning(
                        "Skipping writing to data cache since max_instances was specified."
                    )
                elif util.is_distributed() or (get_worker_info() and
                                               get_worker_info().num_workers):
                    # We also shouldn't write to the cache if there's more than one process loading
                    # instances since each worker only receives a partial share of the instances.
                    logger.warning(
                        "Can't cache data instances when there are multiple processes loading data"
                    )
                else:
                    try:
                        with FileLock(cache_file + ".lock",
                                      timeout=self.CACHE_FILE_LOCK_TIMEOUT):
                            self._instances_to_cache_file(
                                cache_file, instances)
                    except Timeout:
                        logger.warning(
                            "Failed to acquire lock on dataset cache file within %d seconds. "
                            "Cannot write to cache.",
                            self.CACHE_FILE_LOCK_TIMEOUT,
                        )

            return AllennlpDataset(instances)
Esempio n. 11
0
def test_actor_writer_2(workflow_start_regular, tmp_path):
    g_lock = str(Path(tmp_path / "g.lock"))
    incr_lock = str(Path(tmp_path / "incr.lock"))
    val_lock = str(Path(tmp_path / "val.lock"))

    val_err = str(Path(tmp_path / "val.err"))
    incr_err = str(Path(tmp_path / "incr.err"))

    @workflow.virtual_actor
    class SyncCounter:
        def __init__(self, val_lock: str, incr_lock: str, g_lock: str,
                     val_err: str, incr_err: str):
            self.val_lock = val_lock
            self.incr_lock = incr_lock
            self.g_lock = g_lock

            self.val_err = val_err
            self.incr_err = incr_err
            self.v = 0
            if Path(self.val_err).exists():
                raise ValueError()

        @workflow.virtual_actor.readonly
        def val(self):
            with FileLock(self.val_lock), FileLock(self.g_lock):
                if Path(self.val_err).exists():
                    raise ValueError()
                return self.v

        def incr(self, create_incr_err=False):
            with FileLock(self.incr_lock), FileLock(self.g_lock):
                if Path(self.incr_err).exists():
                    raise ValueError()
                if create_incr_err:
                    Path(incr_err).touch()
                self.v += 1
                return self.v

        def __getstate__(self):
            return (self.v, self.val_lock, self.incr_lock, self.g_lock,
                    self.val_err, self.incr_err)

        def __setstate__(self, state):
            (self.v, self.val_lock, self.incr_lock, self.g_lock, self.val_err,
             self.incr_err) = state

    # trigger error in init
    Path(val_err).touch()
    actor = SyncCounter.get_or_create("sync_counter", val_lock, incr_lock,
                                      g_lock, val_err, incr_err)
    with pytest.raises(Exception):
        actor.incr.run()
    Path(val_err).unlink()

    assert ray.get([actor.incr.run_async()
                    for _ in range(9)]) == list(range(2, 11))
    incr_lock = FileLock(incr_lock)
    incr_lock.acquire()

    objs = [actor.incr.run_async() for _ in range(10)]
    assert 10 == actor.val.run()
    Path(val_err).touch()
    with pytest.raises(Exception):
        actor.val.run()
    Path(val_err).unlink()
    incr_lock.release()
    assert ray.get(objs) == list(range(11, 21))

    # test error cases
    actor.incr.run_async()  # 21
    actor.incr.run_async()  # 22
    actor.incr.run_async(create_incr_err=True)  # 23
    actor.incr.run_async()  # 24
    s5 = actor.incr.run_async()  # 25
    with pytest.raises(Exception):
        ray.get(s5)

    assert 23 == actor.val.run()
    Path(incr_err).unlink()
    obj = workflow.resume("sync_counter")
    assert 25 == ray.get(obj)[0]
    assert 25 == actor.val.run()
Esempio n. 12
0
class AquariumController:

    def __init__(self):
        logger.info("=" * 125)
        logger.info("Initializing".center(125))
        logger.info("=" * 125)
        self.load_data()
        self.load_config()
        self.hw_controller = Hardware()
        self.email = EmailAlerts()
        self.temp_c, self.temp_f = self.hw_controller.read_temperature("temp_tank")
        self.file_name = 'graph_data.csv'
        lock_path = self.file_name+".lock"
        self.lock = FileLock(lock_path)

        self.con = self.sql_connection()
        self.cursorObj = self.con.cursor()

        self.calibration_data = {
                "Co2 Calibration Data": {},
                "Fertilizer Calibration Data": {},
                "Water Conditioner Calibration Data": {},
            }
        self.ratio_data = {
            "Tank Size": {},
            "Co2 Ratio": {},
            "Fertilizer Ratio": {},
            "Water Conditioner Ratio": {},
        }
        self.setting_data = {
            "Network": {},
            "Temperature Alerts": {},
            "Email Alert": {}
        }

        self.cal_status = ["Success", "Failed", "In Progress", "None"]

        self.network_config = {
            "sender_email": {},
            "target_email": {},
            "password_email": {},
            "service_email": {},
            "alert_limit": {}
        }
        self.alert_counter = {}

    async def start_calibration(self, pump_type: str):
        try:
            '''asyncio.create_task(self.hw_controller.notification_led_pulse())
            self.hw_controller.button_state()
            asyncio.create_task(self.hw_controller.notification_led_flash())
            self.calibrate_pump(pump_type)
            self.hw_controller.notification_led_stop()
            '''
            await self.hw_controller.notification_led_pulse()
            self.hw_controller.button_state()
            await self.hw_controller.notification_led_flash()
            self.calibrate_pump(pump_type)
            self.hw_controller.notification_led_stop()
        except CalibrationCancelled:
            logger.info("!Calibration was Cancelled!")

    def calibrate_pump(self, pump_type):
        logger.info(f"Running {pump_type} Pump\n"
                    f"{pump_type}", "Calibration started".center(125))
        self.calibration_status(pump_type, self.cal_status[2])
        start = time.time()
        self.hw_controller.pump_on(pump_type)
        self.hw_controller.button_state()
        logger.info(f"Stopping {pump_type}\n"
                    f"{pump_type}", "Calibration finished.".center(125))
        self.calibration_status(pump_type, self.cal_status[0])
        end = time.time()
        self.hw_controller.pump_off(pump_type)
        cal_time = round(end - start, 2)
        per_ml = round(cal_time / 10, 2)
        logger.info(type(cal_time))
        logger.info(f"{pump_type} Runtime: {cal_time}")
        self.calibration_data[f"{pump_type} Calibration Data"].update(
            {
                "Time per 10mL": cal_time,
                "Time per 1mL": per_ml
            }
        )
        self.save()

    def stop_calibration(self, pump_type: str):
        self.hw_controller.stop_calibration()

    def cal_status(self, pump_type: str):
        self.hw_controller.calibration_status()

    def tank_temperature(self):
        temp_c, temp_f = self.hw_controller.read_temperature("temp_tank")
        try:
            ht = self.setting_data["Temperature Alerts"]["High Temp"]
            lt = self.setting_data["Temperature Alerts"]["Low Temp"]
            ht_checked = self.setting_data["Temperature Alerts"]["High Enabled"]
            lt_checked = self.setting_data["Temperature Alerts"]["Low Enabled"]
            if ht_checked == '2':
                if temp_c > float(ht):
                    logger.critical("HIGH TEMP ALERT!!!".center(125))
                    cur_temp = temp_c
                    high_temp_threshold = ht
                    self.email.high_temp_alert_example(cur_temp, high_temp_threshold)
            if lt_checked == '2':
                if temp_c < float(lt):
                    logger.critical("LOW TEMP ALERT!!!".center(125))
        except KeyError:
            logger.warning("No Temperature Alert Data")
        return round(temp_c, 2)

    def email_test(self):
        logger.info("=" * 125)
        self.email.msg_format(alert_type='EMAIL TEST', variable_data=None, custom_msg=self.email.templates.test_msg())

        logger.info("=" * 125)

    def email_ht_alert(self):
        logger.info("=" * 125)
        logger.info("HT Alert Email Function".center(125))
        logger.info("=" * 125)
        data = {
            "Current Temperature": self.temp_c,
            "Current Threshold": self.setting_data["Temperature Alerts"]["High Temp"]
        }
        send = self.email.email_builder(alert_type='High Temperature', alert_data=data)
        logger.info("=" * 125)
    """
    def alert_counters(self, alert_type):
        name = f"{alert_type}" + " counter"
        #dict = self.alert_counter[""]
        if name in self.alert_counter.keys():
            for value in name:
                self.alert_counter[(alert_type + " counter")].update(
                    {
                        f"{name}": int(value)+1,
                    }
                )
            self.save_config()
    """

    def calibration_status(self, pump_type, cal_status):
        logger.info(f"pump: {pump_type}, status: {cal_status}")
        return pump_type, cal_status

    def ratioequals(self, ratio_results):
        logger.info("ratio equals function")
        logger.info(f"values {ratio_results}")
        new_ratio = ('Tank', 'Co2_ratio', 'Co2_water', 'Fertilizer_ratio', 'Fertilizer_water', 'WaterConditioner_ratio'\
                                        , 'WaterConditioner_water')

        zipratio = zip(new_ratio, ratio_results)
        ratiodict = dict(zipratio)
        for value in ['Co2', 'Fertilizer', 'WaterConditioner']:
            logger.info(type(value))
            ratio = float(ratiodict[value + '_ratio'])
            water = float(ratiodict[value + '_water'])
            tank = float(ratiodict['Tank'])
            try:
                dosage = ratio * tank / water
            except ZeroDivisionError:
                dosage = 0
            ratiodict[value + '_dosage'] =  "{:.2f}".format(float(dosage))
            #if dosage != 0 else 0
            self.ratio_data = ratiodict
        logger.info(f"Dict Data: {ratiodict}")
        self.save()


    def ratios(self, ratio_results):
        logger.info(f"Ratio: {ratio_results}")
        logger.info('Tank Size: {} Litres,\n'
         'Co2 Concentrate: {} mL, Co2 to Water: {} Litres,\n'
         'Fertilizer Concentrate: {} mL, Fertilizer to Water: {} Litres,\n'
         'WaterConditioner Concentrate: {} mL, WaterConditioner to Water: {} Litres'.format(
            *ratio_results))
        self.ratioequals(ratio_results)

    def alert_data(self, ht: int, lt: int, ht_enabled, lt_enabled):
        logger.info("New Alert Set")
        logger.info(f"High Temperature: {ht} Enabled:{ht_enabled}")
        logger.info(f"Low Temperature: {lt} Enabled:{lt_enabled}")
        self.setting_data["Temperature Alerts"].update(
            {
                "High Temp": ht,
                "High Enabled": ht_enabled,
                "Low Temp": lt,
                "Low Enabled": lt_enabled
            }
        )
        self.save()

    def save(self):
        data = {
            "Setting Data": self.setting_data,
            "Ratio Data": self.ratio_data,
            "Calibration Data": self.calibration_data,
            # "Schedule Data": schedule_data,
            # "Dosage Data": dosage_data,
            # "Light Hour Data": light_hour_data
        }
        with open('data.txt', 'w') as json_file:
            json_file.write(json.dumps(data, indent=4))
        logger.info("Settings Updated")

    def save_config(self):
        logger.info(f"Before updating config: {self.network_config}")
        config_data = {
            "network_config": self.network_config,
            "alert_counters": self.alert_counter,
        }
        try:
            with open('config.json', 'w') as json_data_file:
                json_data_file.write(json.dumps(config_data, indent=4))
            logger.info(f"Email Details Saved")
        except:
            logger.exception(f" Email Details not Saved")
        logger.info(f"After updating config_data: {config_data}")
        logger.info(f"After updating config: {self.network_config}")

    def save_email(self, email_user: str, service_email: str, password_email):
        logger.info("=" * 125)
        logger.info(f"Email Address Updated".center(125))
        logger.info("=" * 125)
        if "@" not in email_user:
            logger.info(f"adding {service_email} to {email_user}")
            email_user = email_user.strip() + service_email.strip()
        else:
            logger.info(f"Email already has '@' ")
        self.network_config.update(
            {
                "sender_email": "*****@*****.**",
                "target_email": email_user.strip(),
                "password_email": password_email,
                "service_email": service_email.strip(),
                #"alert_limit": alert_limit,
            }
        )

        logger.info(f"{email_user}")
        logger.info(f"Email Pass: {password_email}")
        #logger.info(f"Alert Limit: {alert_limit} Per Day")
        self.save_config()
        logger.info("=" * 125)

    def saveEmail_limit(self, alert_limit: int):
        logger.info("=" * 125)
        logger.info(f"Email Alert Limit Updated".center(125))
        logger.info("=" * 125)
        self.network_config.update(
            {
                "alert_limit": alert_limit,
            }
        )
        logger.info(f"Alert Limit: {alert_limit} Per Day")
        self.save_config()
        logger.info("=" * 125)

    def load_data(self):
        logger.info("=" * 125)
        logger.info("Loading 'data.txt' From Local Path")
        try:
            if os.path.isfile('data.txt'):
                with open('data.txt', 'r') as json_file:
                    data = json.loads(json_file.read())
                    logger.success("'data.txt' Loaded")
                    logger.debug(f"'data.txt' contents: {data}")
                    logger.debug("Assigning Data Values from 'data.txt")
                    self.ratio_data = data["Ratio Data"]
                    self.calibration_data = data["Calibration Data"]
                    self.setting_data = data["Setting Data"]
                    #self.setting_data = data["Temperature Alerts"]
                    # conversion_values
                    # schedule_data
                    # light_hour_data
                    # dosage_data = data["Dosage Data"]
                    logger.success("Data Values Updated")
                    return data
        except (KeyError, ValueError, TypeError):
            logger.critical("Couldn't Load 'data.txt'")
            logger.exception("Traceback")
        logger.info("=" * 125)

    def load_config(self):
        try:
            logger.info("=" * 125)
            logger.info("Loading config_data")
            if os.path.isfile('config.json'):
                with open('config.json', 'r') as json_data_file:
                    config_data = json.loads(json_data_file.read())
                    logger.success("'config.json' Loaded")
                    logger.debug(f"'config.json' contents: {config_data}")
                    try:
                        logger.info("Assigning Config Values from 'config.json'")
                        self.network_config = config_data["network_config"]
                        self.alert_counter = config_data["alert_counters"]
                        logger.success("Config Values Updated")
                    except (KeyError, ValueError, TypeError):
                        logger.warning("Couldn't Assign Values from 'config.json")
        except json.JSONDecodeError.with_traceback():
            logger.critical("Couldn't Load 'config_data")
        logger.info("=" * 125)
        return config_data

    def get_csv(self):
        try:
            with self.lock.acquire(timeout=1):
                with open('graph_data.csv', 'r') as csv_file:
                    return csv_file.read()
        except Timeout:
            logger.warning("Another instance of this application currently holds the lock.")

    def get_db(self):
        db = self.cursorObj.execute("SELECT * FROM tank_temperature")
        return db

    def sql_connection(self):
        try:
            con = sqlite3.connect('AquaPiDB.db')
            logger.debug("Connection is established to Database")
            return con
        except Error:
            logger.exception(Error)

    def update(self):
        g = git.cmd.Git("/home/pi/QTAquarium/")
        msg = g.pull()
        logger.info(f"Repo Status: {msg}")
Esempio n. 13
0
def test_workflow_manager(workflow_start_regular, tmp_path):
    # For sync between jobs
    tmp_file = str(tmp_path / "lock")
    lock = FileLock(tmp_file)
    lock.acquire()

    # For sync between jobs
    flag_file = tmp_path / "flag"
    flag_file.touch()

    @ray.remote
    def long_running(i):
        lock = FileLock(tmp_file)
        with lock.acquire():
            pass

        if i % 2 == 0:
            if flag_file.exists():
                raise ValueError()
        return 100

    outputs = [
        workflow.run_async(long_running.bind(i), workflow_id=str(i))
        for i in range(100)
    ]
    # Test list all, it should list all jobs running
    all_tasks = workflow.list_all()
    assert len(all_tasks) == 100
    all_tasks_running = workflow.list_all(workflow.RUNNING)
    assert dict(all_tasks) == dict(all_tasks_running)
    assert workflow.get_status("0") == "RUNNING"

    # Release lock and make sure all tasks finished
    lock.release()
    for o in outputs:
        try:
            r = ray.get(o)
        except Exception:
            continue
        assert 100 == r
    all_tasks_running = workflow.list_all(workflow.WorkflowStatus.RUNNING)
    assert len(all_tasks_running) == 0
    # Half of them failed and half succeed
    failed_jobs = workflow.list_all("FAILED")
    assert len(failed_jobs) == 50
    finished_jobs = workflow.list_all("SUCCESSFUL")
    assert len(finished_jobs) == 50

    all_tasks_status = workflow.list_all({
        workflow.WorkflowStatus.SUCCESSFUL,
        workflow.WorkflowStatus.FAILED,
        workflow.WorkflowStatus.RUNNING,
    })
    assert len(all_tasks_status) == 100
    assert failed_jobs == [(k, v) for (k, v) in all_tasks_status
                           if v == workflow.WorkflowStatus.FAILED]
    assert finished_jobs == [(k, v) for (k, v) in all_tasks_status
                             if v == workflow.WorkflowStatus.SUCCESSFUL]

    # Test get_status
    assert workflow.get_status("0") == "FAILED"
    assert workflow.get_status("1") == "SUCCESSFUL"
    lock.acquire()
    r = workflow.resume_async("0")
    assert workflow.get_status("0") == workflow.RUNNING
    flag_file.unlink()
    lock.release()
    assert 100 == ray.get(r)
    assert workflow.get_status("0") == workflow.SUCCESSFUL

    # Test cancel
    lock.acquire()
    workflow.resume_async("2")
    assert workflow.get_status("2") == workflow.RUNNING
    workflow.cancel("2")
    assert workflow.get_status("2") == workflow.CANCELED

    # Now resume_all
    resumed = workflow.resume_all(include_failed=True)
    assert len(resumed) == 48
    lock.release()
    assert [ray.get(o) for (_, o) in resumed] == [100] * 48
Esempio n. 14
0
class SimpleCoinbaseBot:
    def getconf(self, section, key, cast, default):
        try:
            val = self.config[section].get(key)
        except:
            return default
        if cast == bool:
            val = str2bool(val)
        else:
            val = cast(val)
        return val

    def __init__(self, config):
        self.cache = {}
        self.config = config
        for section, v in CONF_DEFAULTS.items():
            for key, cast, default in v:
                val = self.getconf(section, key, cast, default)
                if section == 'auth':
                    continue
                print('SimpleCoinbaseBot config: [{}][{}] -> {}'.format(section, key, val))
                setattr(self, key, val)

        #self.log_file = self.getconf('general', 'log_file', str, 'simplebot.log')
        #self.cache_file = self.getconf('general', 'cache_file', str, 'simplebot.cache')
        if not self.cache_file.endswith('.cache'):
            raise Exception('ERROR: Cache filenames must end in .cache')
        self.lock_file = self.cache_file.replace('.cache', '.lock')
        self.lock = FileLock(self.lock_file, timeout=1)
        try:
            self.lock.acquire()
        except:
            print('ERROR: Failed to acquire lock: {}'.format(self.lock_file))
            print('Is another process already running with this config?')
            exit(1)
        self.buy_percent_of_wallet = round(self.buy_wallet_percent/100, 4)
        self.mail_to = self.mail_to.split(',')
        self.client = self.authenticate()
        self.wallet = None
        self.current_price = None
        self.fee = None
        self.open_sells = []
        self.product_info = None
        self.min_size = None
        self.max_size = None
        self.size_decimal_places = None
        self.usd_decimal_places = None
        self.can_buy = False
        self.current_price_target = None
        self.current_price_increase = None
        self.last_buy = None
        # Run all and validate it worked on init
        self.get_all()
        self.__assert()
        self._open_cache()
        self.logit('SimpleCoinbaseBot started: {} size-precision:{} usd-precision:{} current-fee:{} min-size:{} max-size:{}'.format(
            self.coin, self.size_decimal_places, self.usd_decimal_places, self.fee, self.min_size, self.max_size
        ))
        self.logit('SimpleCoinbaseBot started: {} sleep_seconds:{} sell_at_percent:{} max_sells_outstanding:{} max_buys_per_hour:{}'.format(
            self.coin, self.sleep_seconds, self.sell_at_percent, self.max_sells_outstanding, self.max_buys_per_hour
        ))
        #self.stoploss_enable = True
        #self.stoploss_percent = Decimal('-2.0')
        #self.stoploss_seconds = 86400

    def _open_cache(self):
        if os.path.exists(self.cache_file):
            with open(self.cache_file, "rb") as f:
                self.cache = pickle.load(f)

    def _write_cache(self):
        with open(self.cache_file+'-tmp', "wb") as f:
            pickle.dump(self.cache, f)
            os.fsync(f)
        if os.path.exists(self.cache_file):
            os.rename(self.cache_file, self.cache_file+'-prev')
        os.rename(self.cache_file+'-tmp', self.cache_file)

    def _log(self, path, msg):
        now = datetime.now()
        print('{} {}'.format(now, str(msg).strip()))
        with open(path, 'a') as f:
            f.write('{} {}\n'.format(now, str(msg).strip()))

    def logdebug(self, msg):
        if self.debug_log_response:
            self._log(self.debug_log_response_file, msg)

    def logit(self, msg):
        if not self.coin in msg:
            msg = '{} {}'.format(self.coin, msg)
        self._log(self.log_file, msg)

    def authenticate(self):
        key = self.config['auth'].get('key')
        passphrase = self.config['auth'].get('passphrase')
        b64secret = self.config['auth'].get('b64secret')
        auth_client = cbpro.AuthenticatedClient(key, b64secret, passphrase)
        return auth_client

    def get_current_price(self):
        ticker = self.client.get_product_ticker(product_id=self.coin)
        self.logdebug(ticker)
        current_price = ticker['price']
        return Decimal(current_price)

    def get_product_info(self):
        self.product_info = None
        products = self.client.get_products()
        for p in products:
            if p['id'] == self.coin:
                self.product_info = p
                break
        assert(self.product_info != None)
        self.min_size = Decimal(self.product_info['base_min_size'])
        self.max_size = Decimal(self.product_info['base_max_size'])
        # counting the zeros will give the number of decimals to round to
        self.size_decimal_places = self.product_info['base_increment'].split('1')[0].count('0')
        self.usd_decimal_places = self.product_info['quote_increment'].split('1')[0].count('0')

    def get_usd_wallet(self):
        wallet = None
        accounts = self.client.get_accounts()
        for account in accounts:
            if account['currency'] == 'USD':
                wallet = account['available']
                self.logdebug(account)
                break
        return Decimal(wallet)

    def get_open_sells(self):
        orders = self.client.get_orders()
        self.logdebug(orders)
        open_sells = []
        for order in orders:
            o = order
            if order['side'] == 'sell' and order['product_id'] == self.coin:
                order['price'] = Decimal(order['price'])
                order['size'] = Decimal(order['size'])
                open_sells.append(order)
        return open_sells

    def get_fee(self):
        """ current pip cbpro version doesn't have my get_fees() patch, so manually query it """
        #{'taker_fee_rate': '0.0035', 'maker_fee_rate': '0.0035', 'usd_volume': '21953.58'}
        fees = self.client._send_message('get', '/fees')
        assert('taker_fee_rate' in fees)
        self.logdebug(fees)
        if Decimal(fees['taker_fee_rate']) > Decimal(fees['maker_fee_rate']):
            return Decimal(fees['taker_fee_rate'])
        return Decimal(fees['maker_fee_rate'])

    def _rand_msleep(self):
        time.sleep(uniform(0.1, 0.75))

    def get_all(self):
        self._rand_msleep()
        self.wallet = self.get_usd_wallet()
        self._rand_msleep()
        self.get_product_info()
        self._rand_msleep()
        self.current_price = self.get_current_price()
        #self.open_sells = self.get_open_sells()
        self.fee = self.get_fee()
        self._rand_msleep()
        self.get_current_price_target()
        self._rand_msleep()
        self.can_buy = self.check_if_can_buy()

    def sendemail(self, subject, msg=None):
        """ TODO: Add auth, currently setup to relay locally or relay-by-IP """
        for email in self.mail_to:
            if not email.strip():
                continue
            headers = "From: %s\r\nTo: %s\r\nSubject: %s %s\r\n\r\n" % (
                self.mail_from, email, self.coin, subject)
            if not msg:
                msg2 = subject
            else:
                msg2 = msg
            msg2 = headers + msg2
            server = smtplib.SMTP(self.mail_host)
            server.sendmail(self.mail_from, email, msg2)
            server.quit()
            time.sleep(0.1)

    def __assert(self):
        assert(self.wallet != None)
        assert(self.current_price != None)

    def maybe_buy_sell(self):
        self.__assert()
        if not self.can_buy:
            return

        # Check if USD wallet has enough available
        if self.wallet < Decimal(self.product_info['min_market_funds']):
            self.logit('WARNING: Wallet value too small (<${}): {}'.format(
                self.product_info['min_market_funds'], self.wallet))
            return

        # Calculate and check if size is large enough (sometimes it's not if available wallet is too small)
        buy_amount = round(Decimal(self.buy_percent_of_wallet) * Decimal(self.wallet), self.usd_decimal_places)
        buy_size = round(Decimal(buy_amount)/self.current_price, self.size_decimal_places)
        if buy_size <= self.min_size:
            self.logit('WARNING: Buy size is too small {} < {} wallet:{}.'.format(
                buy_size, self.min_size, self.wallet))
            return

        # Check if USD wallet has enough available
        if buy_amount < Decimal(self.product_info['min_market_funds']):
            self.logit('WARNING: Buy amount too small (<${}): {}'.format(
                self.product_info['min_market_funds'], buy_amount))
            return

        # Make sure buy_amount is within buy_wallet_min/max
        if buy_amount < self.buy_wallet_min:
            self.logit('WARNING: buy_wallet_min hit. Setting to min.')
            buy_amount = self.buy_wallet_min
        elif buy_amount > self.buy_wallet_max:
            self.logit('WARNING: buy_wallet_max hit. Setting to max.')
            buy_amount = self.buy_wallet_max

        # adjust size to fit with fee
        buy_size = round(Decimal(buy_size) - Decimal(buy_size)*Decimal(self.fee), self.size_decimal_places)
        self.logit('BUY: price:{} amount:{} size:{}'.format(
            self.current_price, buy_amount, buy_size))
        rc = self.client.place_market_order(
            product_id=self.coin,
            side='buy',
            funds=str(buy_amount)
        )
        self.logdebug(rc)
        self.logit('BUY-RESPONSE: {}'.format(rc))
        order_id = rc['id']
        errors = 0
        self.last_buy = None
        # Wait until order is completely filled
        if order_id in self.cache:
            self.logit('ERROR: order_id exists in cache. ????: {}'.format(order_id))
        self.cache[order_id] = {
            'first_status':rc, 'last_status':None, 'time':time.time(),
            'sell_order':None, 'sell_order_completed':None, 'completed':False, 'profit_usd':None
        }
        self._write_cache()
        done = False
        error = False
        status_errors = 0
        time.sleep(5)
        while 1:
            try:
                buy = self.client.get_order(order_id)
                self.cache[order_id]['last_status'] = buy
                self._write_cache()
                self.logdebug(buy)
                if 'settled' in buy:
                    if buy['settled']:
                        self.logit('FILLED: size:{} funds:{}'.format(buy['filled_size'], buy['funds']))
                        self.last_buy = buy
                        done = True
                        break
                else:
                    if 'message' in buy:
                        self.logit('WARNING: Failed to get order status: {}'.format(buy['message']))
                        self.logit('WARNING: Order status failure may be temporary, due to coinbase issues or exchange delays. Check: https://status.pro.coinbase.com')
                        status_errors += 1
                    else:
                        self.logit('WARNING: Failed to get order status: {}'.format(order_id))
                        status_errors += 1
                    time.sleep(10)
                if status_errors > 10:
                    errors += 1
            except Exception as err:
                self.logit('WARNING: get_order() failed:', err)
                errors += 1
                time.sleep(8)
            if errors > 5:
                self.logit('WARNING: Failed to get order. Manual intervention needed.: {}'.format(
                    order_id))
                break
            time.sleep(2)

        # Buy order done, now place sell
        if done:
            rc = self.client.place_limit_order(
                product_id=self.coin,
                side='sell',
                price=str(self.current_price_target),
                size=str(round(Decimal(self.last_buy['filled_size']), self.size_decimal_places)),
            )
            self.logdebug(rc)
            self.logit('SELL-RESPONSE: {}'.format(rc))
            msg = 'BUY-FILLED: size:{} funds:{}\n'.format(buy['filled_size'], buy['funds'])
            msg = '{} SELL-PLACED: size:{} price:{}'.format(
                msg, self.last_buy['filled_size'], self.current_price_target)
            for m in msg.split('\n'):
                self.logit(m.strip())
            if not self.notify_only_completed:
                self.sendemail('BUY/SELL', msg=msg)
            self.cache[order_id]['sell_order'] = rc
            self._write_cache()
            self.last_buy = None
        else:
            # buy was placed but could not get order status
            if 'message' in buy:
                msg = 'BUY-PLACED-NOSTATUS: {}\n'.format(buy['message'])
            else:
                msg = 'BUY-PLACED-NOSTATUS: size:{} funds:{}\n'.format(
                    buy['filled_size'], buy['funds'])
            self.logit(msg)
            self.sendemail('BUY-ERROR', msg=msg)
        return buy

    def run_stoploss(self, buy_order_id):
        """ Cancel sell order, place new market sell to fill immediately
            get response and update cache
        """
        v = self.cache[buy_order_id]
        sell = v['sell_order']
        # cancel
        rc = self.client.cancel_order(sell['id'])
        self.logdebug(rc)
        self.logit('STOPLOSS: CANCEL-RESPONSE: {}'.format(rc))
        time.sleep(5)
    	# new order
        rc = self.client.place_market_order(
            product_id=self.coin,
            side='sell',
            size=sell['size']
        )
        self.logdebug(rc)
        self.cache[buy_order_id]['sell_order'] = rc
        self._write_cache()
        self.logit('STOPLOSS: SELL-RESPONSE: {}'.format(rc))
        order_id = rc['id']
        time.sleep(5)
        done = False
        while 1:
            try:
                status = self.client.get_order(order_id)
                self.logdebug(status)
                self.cache[buy_order_id]['sell_order'] = status
                self._write_cache()
                if 'settled' in status:
                    if status['settled']:
                        self.logit('SELL-FILLED: {}'.format(status))
                        self.cache[buy_order_id]['sell_order_completed'] = status
                        self._write_cache()
                        done = True
                        break
                else:
                    if 'message' in status:
                        self.logit('WARNING: Failed to get order status: {}'.format(status['message']))
                        self.logit('WARNING: Order status failure may be temporary, due to coinbase issues or exchange delays. Check: https://status.pro.coinbase.com')
                        status_errors += 1
                    else:
                        self.logit('WARNING: Failed to get order status: {}'.format(order_id))
                        status_errors += 1
                    time.sleep(10)
                if status_errors > 10:
                    errors += 1
            except Exception as err:
                self.logit('WARNING: get_order() failed:', err)
                errors += 1
                time.sleep(8)
            if errors > 5:
                self.logit('WARNING: Failed to get order. Manual intervention needed.: {}'.format(
                    order_id))
                break
            time.sleep(2)

        if not done:
            self.logit('ERROR: Failed to get_order() for stoploss. This is an error and TODO item on how to handle')

    def check_sell_orders(self):
        """ Check if any sell orders have completed """
        for buy_order_id, v in self.cache.items():
            if self.cache[buy_order_id]['completed']:
                continue
            if not v['sell_order']:
                self.logit('WARNING: No sell_order for buy {}. This should not happen.'.format(
                    buy_order_id))
                if time.time() - v['time'] > 60*60:
                    self.logit('WARNING: Failed to get order status:')
                    self.logit('WARNING: Writing as done/error since it has been > 30 minutes.')
                    self.cache[buy_order_id]['completed'] = True
                    self._write_cache()
                continue
            if 'message' in v['sell_order']:
                self.logit('WARNING: Corrupted sell order, marking as done: {}'.format(v['sell_order']))
                self.cache[buy_order_id]['completed'] = True
                self.cache[buy_order_id]['sell_order'] = None
                self._write_cache()
                continue
            sell = self.client.get_order(v['sell_order']['id'])
            if 'message' in sell:
                self.logit('WARNING: Failed to get sell order status (retrying later): {}'.format(
                    sell['message']))
                if time.time() - v['time'] > 60*60:
                    self.logit('WARNING: Failed to get order status:')
                    self.logit('WARNING: Writing as done/error since it has been > 30 minutes.')
                    self.cache[buy_order_id]['completed'] = True
                    self._write_cache()
                continue

            if 'status' in sell and sell['status'] != 'open':
                # calculate profit from buy to sell
                # done, remove buy/sell
                self.cache[buy_order_id]['completed'] = True
                self.cache[buy_order_id]['sell_order_completed'] = sell
                if sell['status'] == 'done':
                    sell_filled_size = Decimal(sell['filled_size'])
                    sell_value = Decimal(sell['executed_value'])
                    buy_filled_size = Decimal(v['last_status']['filled_size'])
                    buy_value = Decimal(v['last_status']['executed_value'])
                    #buy_sell_diff = round((sell_price*sell_filled_size) - (buy_price*buy_filled_size), 2)
                    buy_sell_diff = round(sell_value - buy_value, 2)
                    done_at = time.mktime(time.strptime(sell['done_at'].split('.')[0], '%Y-%m-%dT%H:%M:%S'))
                    self.cache[buy_order_id]['profit_usd'] = buy_sell_diff
                    msg = 'SELL-COMPLETED: ~duration:{:.2f} bought_val:{} sold_val:{} profit_usd:{}'.format(
                        time.time() - done_at,
                        round(buy_value, 2),
                        round(sell_value, 2),
                        buy_sell_diff
                    )
                    self.logit(msg)
                    self.sendemail('SELL-COMPLETED', msg=msg)
                else:
                    self.logit('SELL-COMPLETED-WITH-OTHER-STATUS: {}'.format(sell['status']))
                self._write_cache()
            else:
                # check for stoploss if enabled
                if self.stoploss_enable:
                    created_at = time.mktime(time.strptime(sell['created_at'].split('.')[0], '%Y-%m-%dT%H:%M:%S'))
                    duration = time.time() - created_at
                    bought_price = round(Decimal(v['last_status']['executed_value']) / Decimal(v['last_status']['filled_size']), 4)
                    p = 100*(self.current_price/bought_price) - Decimal('100.0')
                    stop_seconds = False
                    stop_percent = False
                    if duration >= self.stoploss_seconds:
                        stop_seconds = True
                    if p <= self.stoploss_percent:
                        stop_percent = True
                    if (stop_seconds or stop_percent) and self.stoploss_strategy == 'report':
                        self.logit('STOPLOSS: percent:{} duration:{}'.format(p, duration))

                    if self.stoploss_strategy == 'both' and stop_percent and stop_seconds:
                        self.logit('STOPLOSS: running stoploss strategy: {} percent:{} duration:{}'.format(
                            self.stoploss_strategy,
                            p, duration
                        ))
                        self.run_stoploss(buy_order_id)
                    elif self.stoploss_strategy == 'either' and (stop_percent or stop_seconds):
                        self.logit('STOPLOSS: running stoploss strategy: {} percent:{} duration:{}'.format(
                            self.stoploss_strategy,
                            p, duration
                        ))
                        self.run_stoploss(buy_order_id)

            time.sleep(0.75)

    def get_current_price_target(self):
        current_percent_increase = (self.fee*2)+(self.sell_at_percent/100)
        self.current_price_target = round(
            self.current_price * current_percent_increase + self.current_price,
            self.usd_decimal_places
        )
        self.current_price_increase = self.current_price * current_percent_increase
        return self.current_price_target

    @property
    def total_open_orders(self):
        total = 0
        for buy_order_id, v in self.cache.items():
            if not v['completed']:
                total += 1
        return total

    @property
    def total_sells_in_past_hour(self):
        current_time = time.time()
        last_hour_time = current_time - (60*60)
        total = 0
        for buy_order_id, v in self.cache.items():
            if v['time'] >= last_hour_time:
                total += 1
        return total

    def check_if_can_buy(self):
        """ Check orders if a sell price is <= current_price_target
            If so, this means no buy is allowed until that order is filled or out of range.
            Only allow within the fee range though to keep buy/sells further apart.
        """
        self.get_current_price_target()

        # Check how many buys were placed in past hour and total open
        if self.total_sells_in_past_hour > self.max_buys_per_hour:
            self.logit('WARNING: max_buys_per_hour({}) hit'.format(self.max_buys_per_hour))
            return

        # Don't count other orders now, only ones being tracked here
        #if len(self.open_sells) >= self.max_sells_outstanding:
        if self.total_open_orders >= self.max_sells_outstanding:
            self.logit('WARNING: max_sells_outstanding hit ({} of {})'.format(
                self.total_open_orders, self.max_sells_outstanding))
            return False
        can = True
        for buy_order_id, v in self.cache.items(): #self.open_sells:
            if v['completed']:
                continue
            sell_order = v['sell_order']
            if not sell_order:
                continue
            if not 'price' in sell_order:
                pass
                #iself.logit('WARNING: Corrupted sell order. Writing as completed (error): {}'.format(sell_order))
                #self.cache[buy_order_id]['sell_order'] = None
                #self.cache[buy_order_id]['completed'] = True
                #self._write_cache()
            else:
                sell_price = Decimal(sell_order['price'])
                adjusted_sell_price = round(sell_price - (self.fee*2*sell_price), self.usd_decimal_places)
                if adjusted_sell_price <= self.current_price_target:
                    can = False
        return can

    def run(self):
        # Throttle startups randomly
        time.sleep(uniform(1, 5))
        while 1:
            self.get_all()
            self.logit('STATUS: price:{} fee:{} wallet:{} open-sells:{} price-target:{} can-buy:{}'.format(
                self.current_price, self.fee, self.wallet, self.total_open_orders, self.current_price_target,
                self.can_buy,
            ))
            self.maybe_buy_sell()
            self.check_sell_orders()
            time.sleep(self.sleep_seconds)
Esempio n. 15
0
class ObdIO(object):
    """Create a obd server"""
    def __init__(self, port):
        self.port = port
        self.ser = None
        self.__lock = FileLock(gettempdir() + "/yacm-" +
                               self.port.replace("/", "") + ".lock",
                               timeout=1)
        self.__lock.acquire()

    def __enter__(self):
        self.ser = serial.Serial(self.port,
                                 parity=serial.PARITY_NONE,
                                 stopbits=1,
                                 bytesize=8)
        self.ser.baudrate = 500000
        # self.__write("at", "ws")  # Reset device
        self.__write("at", "d0")  # Set defaults
        self.__write("at", "l0")  # Disable line feed
        self.__write("at", "e0")  # Disable echo
        self.__write("at", "h0")  # Disable headers
        self.__write("at", "sp0")  # Auto set protocol
        return self

    def __exit__(self, exception_type, exception_value, exception_traceback):
        self.__write("at", "ws")  # Reset device
        self.ser.close()
        self.__lock.release()
        os.remove(self.__lock.lock_file)

    def query(self, mode: str, code: str) -> Union[str, Tuple]:
        """Query obd requests"""
        self.__write(mode, code)
        return self.__read()

    def __write(self, mode: str, code: str) -> None:
        self.ser.flushInput()
        self.ser.write(f"{mode}{code}\r".encode())
        logging.info(f"Mode: {mode} Code: {code}")
        self.ser.flush()
        if mode == "at":
            if code == "ws":
                self.ser.readline()
            else:
                self.ser.read_until(b'>')  # Discard the "OK" message

    def __read(self) -> Union[str, Tuple]:
        raw_data = self.ser.read_until(b'\r>')
        while raw_data == 0:
            raw_data = self.ser.read_until(b'\r>')
        logging.info(f"raw_data: {raw_data}")
        if raw_data not in {b'\r?\r>', b'?\r\r'}:
            if raw_data[0] == 13 and raw_data[-3] != 13:  # Emulator
                raw_data = raw_data[1:-2]
            if raw_data[0] != 13 and raw_data[-3] == 13:  # Car
                raw_data = raw_data[:-3]
            if raw_data == b"NO DATA":
                result = "NO DATA"
            else:
                result = raw_data.decode("ascii").lower().split(' ')[2:]
        else:
            result = "?"
        logging.info(f"result: {result}\n")
        return result

    def supported_pids(self) -> List[str]:
        """Return supported pids"""
        hex2bin_map = {
            "0": "0000",
            "1": "0001",
            "2": "0010",
            "3": "0011",
            "4": "0100",
            "5": "0101",
            "6": "0110",
            "7": "0111",
            "8": "1000",
            "9": "1001",
            "a": "1010",
            "b": "1011",
            "c": "1100",
            "d": "1101",
            "e": "1110",
            "f": "1111",
        }
        supported_pids: List[str] = []
        for pid in ["00", "20", "40", "60", "80"]:
            pids = ''.join(self.query("01", pid))
            if pids not in {"?", "NO DATA"}:
                binary_pids = ''.join(hex2bin_map[nibble] for nibble in pids)
                pid_code = int(pid)
                for bit in binary_pids:
                    pid_code += 1
                    if bit == "1":
                        supported_pids.append(hex(pid_code)[2:])
        return supported_pids
Esempio n. 16
0
class CandleConnector():
    def __init__(self):
        self.lock = FileLock("config.csv.lock")
        # make dict here that stores the amount for each coin
        self.config = "config.csv"
        self.candles = candles.BinaceConnector()
        self.masterTicker = -60

    def readConfig(self):
        self.lock.acquire()
        df = pd.read_csv(self.config,encoding='utf8', delimiter=',' , 
            names= ['coin', 'capital', 'starting', 'limit', 'currentPrice', 'autobought', 'takeprofit', 'updatetime', 'orderid', 'takeProfitAmount', 'takeProfitOrder', 'delta'])
        self.lock.release()
        df.set_index('coin', inplace=True)
        return df

    #get the current config
    def getCoinConfigData(self, coin):
        df = self.readConfig()
        return df.loc[coin]

    #save a new copy of the config
    def setCoinConfigData(self, df):
        self.lock.acquire()
        df.to_csv(f'config.csv', mode='w', header=False, index=True)
        self.lock.release()

    def getAutoBoughtAmount(self, coin):
        return float(self.getCoinConfigData(coin)['autobought'])

    # helper for buying a number of coins at current price
    def orderNumber(self, coin, number):
        return (self.candles.buyMarket(coin, number))

    # gives you a quote for a coin
    def getQuote(self, coin):
        return float(self.candles.getCoinPrice(coin))

    # write out to a log file
    def logit(self, message, destination):
        with open(f"testData/{destination}.txt", "a") as f:
            f.write(message)
            f.write("\n")

    def saveCoinBuyData(self, coin, price, amount, setcap=0.0, setupdatetime=180, order="none"):
        df = self.readConfig()
        if setcap > 0:
            df.at[coin, 'capital'] = setcap
        df.at[coin, 'starting'] = price
        df.at[coin, 'autobought'] = amount
        df.at[coin, 'limit'] = price * df.at[coin, 'takeprofit']
        df.at[coin, 'updatetime'] = setupdatetime
        df.at[coin, 'orderid'] = order
        self.setCoinConfigData(df)


    def saveCoinLimitData(self, coin, price, limit, setupdatetime=180,):
        df = self.readConfig()
        df.at[coin, 'currentPrice'] = price
        df.at[coin, 'limit'] = limit
        self.setCoinConfigData(df)

    def updateDelta(self, coin, delta, price):
        df = self.readConfig()
        df.at[coin, 'currentPrice'] = price
        df.at[coin, 'delta'] = delta -180
        self.setCoinConfigData(df)

    def updateOrder(self, coin, order):
        df = self.readConfig()
        df.at[coin, 'orderid'] = order
        self.setCoinConfigData(df)


    #sell an amount at current price
    def sellNow(self, coin):
        #get the amount the bot bought
        amount = self.getAutoBoughtAmount(coin)
        print(f"found {amount}")
        if amount > 0:
            print(f"selling")
            sellorder = self.candles.sellMarket(coin, amount)
            time.sleep(1)
            # save the data for analysis later and reset the bot coin's config
            self.logit(f"SELLING DUE TO TAKEPROFIT {sellorder}", "logger")
            #need to check to make sure we did sell before we save this
            sellprice = float(sellorder['fills'][0]['price']) * amount
            print(sellprice)
            self.saveCoinBuyData(coin, 0, 0, setcap=sellprice)

    def echoCurrentTick(self):
        with open('tick', 'w') as f:
            f.write(f"{self.masterTicker}")

    def runForever(self):
        while 1:
            self.masterTicker += 60
            df = self.readConfig()
            # loop over the contents of our config file
            tickers = self.candles.getBook()
            for coin, row in df.iterrows():
                # check to see if the bot has made a purchase
                position = float(row['autobought'])
                if position > 0:
                    for x in tickers:
                        if coin == x['symbol']:
                            currentPrice = float(x['bidPrice'])
                    # if the bot has bought, check the update time
                    currentOrder = row['orderid']
                    updatetime = int(row['updatetime'])
                    delta = self.masterTicker % updatetime
                    if delta== 0:
                        #get the current price and check if it's above our current limit
                        currentLimit = float(row['limit'])
                        if currentPrice < currentLimit:
                            print("checking order")
                            if "none" not in currentOrder:
                                print("cancelOrder")
                                status = self.candles.checkStatus(coin, currentOrder)
                                if status != 'FILLED':
                                    self.candles.cancelOrder(coin, currentOrder)
                                    time.sleep(.3)
                                    print(f"selling because price {currentPrice} < limit {currentLimit}")
                                    self.sellNow(coin)
                            else:
                                self.sellNow(coin)
                        else:
                            # calculate a new limit based on our coin's config profile
                            newlimit = currentPrice*float(row['takeprofit'])
                            print(f"new limit {newlimit}")
                            if newlimit > currentLimit:
                                print(f"new limit > {currentLimit}")
                                self.saveCoinLimitData(coin, currentPrice, newlimit)

                    starting = float(row['starting'])
                    # check to see if a stop loss order has been placed
                    if "none" not in currentOrder:
                        print(f"order {currentOrder} is", end = " ")
                        status = self.candles.checkStatus(coin, currentOrder)
                        if status == 'FILLED':
                            print("FILLED so close")
                            sellprice = float(status['fills'][0]['price']) * row['autobought']
                            self.saveCoinBuyData(coin, 0, 0, setcap=sellprice)
                        print("open")
                    # if stop loss has not been placed, and we are in profit attempt to atleast cover our fees
                    elif currentPrice > starting + (starting * 0.005):
                        print("made our money back placing limit order")
                        #save this order and save to config`
                        order = connector.candles.stopLoss(coin, 
                            stop=(starting + (starting * (2 * .0008))), 
                            limit=(starting + (starting * (2 * .00076))), 
                            position=position)['clientOrderId']
                        self.updateOrder(coin, order)

                    self.updateDelta(coin, delta, currentPrice)
                    self.logit(f"{self.masterTicker}, {row.starting}, {currentPrice}, {row.limit}", coin)
            self.echoCurrentTick()
            time.sleep(60)
Esempio n. 17
0
class Metric(object):
    def __init__(
        self,
        name: str = None,
        experiment_id: Optional[str] = None,
        process_id: int = 0,
        num_process: int = 1,
        data_dir: Optional[str] = None,
        in_memory: bool = False,
        **kwargs,
    ):
        """ A Metrics is the base class and common API for all metrics.
            Args:
                process_id (int): specify the id of the node in a distributed settings between 0 and num_nodes-1
                    This can be used, to compute metrics on distributed setups
                    (in particular non-additive metrics like F1).
                data_dir (str): path to a directory in which temporary data will be stored.
                    This should be a shared file-system for distributed setups.
                experiment_id (str): Should be used if you perform several concurrent experiments using
                    the same caching directory (will be indicated in the raise error)
                in_memory (bool): keep all predictions and references in memory. Not possible in distributed settings.
        """
        # Safety checks
        assert isinstance(
            process_id, int
        ) and process_id >= 0, "'process_id' should be a number greater than 0"
        assert (isinstance(num_process, int) and num_process > process_id
                ), "'num_process' should be a number greater than process_id"
        assert (
            process_id == 0 or not in_memory
        ), "Using 'in_memory' is not possible in distributed setting (process_id > 0)."

        # Metric name
        self.name = camelcase_to_snakecase(self.__class__.__name__)
        # Configuration name
        self.config_name: str = name or "default"

        self.process_id = process_id
        self.num_process = num_process
        self.in_memory = in_memory
        self.experiment_id = experiment_id if experiment_id is not None else "cache"
        self._version = "1.0.0"
        self._data_dir_root = os.path.expanduser(data_dir or HF_METRICS_CACHE)
        self.data_dir = self._build_data_dir()

        # prepare info
        info = self._info()
        info.metric_name = self.name
        info.config_name = self.config_name
        info.version = self._version
        self.info = info

        # Update 'compute' and 'add' docstring
        self.compute.__func__.__doc__ += self.info.inputs_description
        self.add_batch.__func__.__doc__ += self.info.inputs_description
        self.add.__func__.__doc__ += self.info.inputs_description

        self.arrow_schema = pa.schema(field
                                      for field in self.info.features.type)
        self.buf_writer = None
        self.writer = None
        self.writer_batch_size = None
        self.data = None

        # Check we can write on the cache file without competitors
        self.cache_file_name = self._get_cache_path(self.process_id)
        self.filelock = FileLock(self.cache_file_name + ".lock")
        try:
            self.filelock.acquire(timeout=1)
        except Timeout:
            raise ValueError(
                "Cannot acquire lock, caching file might be used by another process, "
                "you should setup a unique 'experiment_id' for this run.")

    def _relative_data_dir(self, with_version=True):
        """Relative path of this dataset in data_dir."""
        builder_data_dir = os.path.join(self.name, self.config_name)
        if not with_version:
            return builder_data_dir

        version = self._version
        version_data_dir = os.path.join(builder_data_dir, str(version))
        return version_data_dir

    def _build_data_dir(self):
        """ Return the directory for the current version.
        """
        builder_data_dir = os.path.join(
            self._data_dir_root, self._relative_data_dir(with_version=False))
        version_data_dir = os.path.join(
            self._data_dir_root, self._relative_data_dir(with_version=True))

        def _other_versions_on_disk():
            """Returns previous versions on disk."""
            if not os.path.exists(builder_data_dir):
                return []

            version_dirnames = []
            for dir_name in os.listdir(builder_data_dir):
                try:
                    version_dirnames.append((Version(dir_name), dir_name))
                except ValueError:  # Invalid version (ex: incomplete data dir)
                    pass
            version_dirnames.sort(reverse=True)
            return version_dirnames

        # Check and warn if other versions exist on disk
        version_dirs = _other_versions_on_disk()
        if version_dirs:
            other_version = version_dirs[0][0]
            if other_version != self._version:
                warn_msg = (
                    "Found a different version {other_version} of metric {name} in "
                    "data_dir {data_dir}. Using currently defined version "
                    "{cur_version}.".format(
                        other_version=str(other_version),
                        name=self.name,
                        data_dir=self._data_dir_root,
                        cur_version=str(self._version),
                    ))
                logger.warning(warn_msg)

        os.makedirs(version_data_dir, exist_ok=True)
        return version_data_dir

    def _get_cache_path(self, node_id):
        return os.path.join(
            self.data_dir, f"{self.experiment_id}-{self.name}-{node_id}.arrow")

    def finalize(self, timeout=120):
        """ Close all the writing process and load/gather the data
            from all the nodes if main node or all_process is True.
        """
        self.writer.finalize()
        self.writer = None
        self.buf_writer = None
        self.filelock.release()

        if self.process_id == 0:
            # Let's acquire a lock on each node files to be sure they are finished writing
            node_files = []
            locks = []
            for node_id in range(self.num_process):
                node_file = self._get_cache_path(node_id)
                filelock = FileLock(node_file + ".lock")
                filelock.acquire(timeout=timeout)
                node_files.append({"filename": node_file})
                locks.append(filelock)

            # Read the predictions and references
            reader = ArrowReader(path=self.data_dir, info=None)
            self.data = reader.read_files(node_files)

            # Release all of our locks
            for lock in locks:
                lock.release()

    def compute(self,
                predictions=None,
                references=None,
                timeout=120,
                **metrics_kwargs):
        """ Compute the metrics.
        """
        if predictions is not None:
            self.add_batch(predictions=predictions, references=references)
        self.finalize(timeout=timeout)

        self.data.set_format(type=self.info.format)

        predictions = self.data["predictions"]
        references = self.data["references"]
        output = self._compute(predictions=predictions,
                               references=references,
                               **metrics_kwargs)
        return output

    def add_batch(self, predictions=None, references=None, **kwargs):
        """ Add a batch of predictions and references for the metric's stack.
        """
        batch = {"predictions": predictions, "references": references}
        if self.writer is None:
            self._init_writer()
        self.writer.write_batch(batch)

    def add(self, prediction=None, reference=None, **kwargs):
        """ Add one prediction and reference for the metric's stack.
        """
        example = {"predictions": prediction, "references": reference}
        example = self.info.features.encode_example(example)
        if self.writer is None:
            self._init_writer()
        self.writer.write(example)

    def _init_writer(self):
        if self.in_memory:
            self.buf_writer = pa.BufferOutputStream()
            self.writer = ArrowWriter(schema=self.arrow_schema,
                                      stream=self.buf_writer,
                                      writer_batch_size=self.writer_batch_size)
        else:
            self.buf_writer = None
            self.writer = ArrowWriter(schema=self.arrow_schema,
                                      path=self.cache_file_name,
                                      writer_batch_size=self.writer_batch_size)

    def _info(self) -> MetricInfo:
        """Construct the MetricInfo object. See `MetricInfo` for details.

        Warning: This function is only called once and the result is cached for all
        following .info() calls.

        Returns:
            info: (MetricInfo) The metrics information
        """
        raise NotImplementedError

    def _compute(self,
                 predictions=None,
                 references=None,
                 **kwargs) -> Dict[str, Any]:
        """ This method defines the common API for all the metrics in the library """
        raise NotImplementedError
Esempio n. 18
0
 def __init__(self):
     lock = FileLock(lock_file)
     lock.acquire()
Esempio n. 19
0
from filelock import Timeout, FileLock
import sqlite3
import datetime, time

while 1:
    dblock = FileLock("db.lock", timeout=1)
    dblock.acquire()
    try:
        db = sqlite3.connect("./user_gamedata.db3")
        cursor = db.cursor()

        # DELETE timeout
        count = 0
        cursor.execute("SELECT * FROM lineupPool")
        results = cursor.fetchall()
        for record in results:
            if round(time.time()) - round(record[1]) > 15:
                count += 1
                cursor.execute("DELETE FROM lineupPool WHERE uid='" +
                               str(record[0]) + "'")
                db.commit()
        db.close()
        # Print message
        print(
            str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + ": " +
            str(count) + " user(s) has/have deleted")

    except:
        print("ERROR occured, please check")

    finally:
Esempio n. 20
0
        yield l[i:i + n]


call_dir = Path.cwd()
root_dir = Path(sys.path[0]).absolute()
queue_dir = Path(queue_dir).absolute()
out_dir = Path(out_dir).absolute()
done_dir = Path(done_dir).absolute()
temp_dir = Path(root_dir / temp_dir).absolute()
train_video = False if train_video == "False" else True
call_dir, root_dir, queue_dir, out_dir, done_dir, temp_dir, train_video

lock = FileLock(root_dir / "gpu.lock")
"""### Chunk Loop"""

with lock.acquire(1):

    ###### looop
    #cria uma lista com um snapchot do glob

    #### if queue_dir is file chunk =[queue]

    if queue_dir.is_dir():
        queue = sorted(list(queue_dir.glob("*")))
    else:
        queue = [queue_dir]

    for chunk in split_to_batches(queue, queue_batch):
        print("QUEUE CHUNCK:", chunk)
        print("net_batch", net_batch, "or len(chunk) if less")
Esempio n. 21
0
class Metric(MetricInfoMixin):
    """A Metrics is the base class and common API for all metrics.

    Args:
        config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
            to be overridden when the metric loading script is modified.
        keep_in_memory (``bool``): keep all predictions and references in memory. Not possible in distributed settings.
        cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
            The data directory should be located on a shared file-system in distributed setups.
        num_process (``int``): specify the total number of nodes in a distributed settings.
            This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
        process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
            This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
        seed (Optional ``int``): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
        experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
            This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
        max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
        timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
    """

    def __init__(
        self,
        config_name: Optional[str] = None,
        keep_in_memory: bool = False,
        cache_dir: Optional[str] = None,
        num_process: int = 1,
        process_id: int = 0,
        seed: Optional[int] = None,
        experiment_id: Optional[str] = None,
        max_concurrent_cache_files: int = 10000,
        timeout: Union[int, float] = 100,
        **kwargs,
    ):
        # prepare info
        self.config_name = config_name or "default"
        info = self._info()
        info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
        info.config_name = self.config_name
        info.experiment_id = experiment_id or "default_experiment"
        MetricInfoMixin.__init__(self, info)  # For easy access on low level

        # Safety checks on num_process and process_id
        assert isinstance(process_id, int) and process_id >= 0, "'process_id' should be a number greater than 0"
        assert (
            isinstance(num_process, int) and num_process > process_id
        ), "'num_process' should be a number greater than process_id"
        assert (
            num_process == 1 or not keep_in_memory
        ), "Using 'keep_in_memory' is not possible in distributed setting (num_process > 1)."
        self.num_process = num_process
        self.process_id = process_id
        self.max_concurrent_cache_files = max_concurrent_cache_files

        self.keep_in_memory = keep_in_memory
        self._data_dir_root = os.path.expanduser(cache_dir or HF_METRICS_CACHE)
        self.data_dir = self._build_data_dir()
        self.seed: int = seed or np.random.get_state()[1][0]
        self.timeout: Union[int, float] = timeout

        # Update 'compute' and 'add' docstring
        # methods need to be copied otherwise it changes the docstrings of every instance
        self.compute = types.MethodType(copyfunc(self.compute), self)
        self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
        self.add = types.MethodType(copyfunc(self.add), self)
        self.compute.__func__.__doc__ += self.info.inputs_description
        self.add_batch.__func__.__doc__ += self.info.inputs_description
        self.add.__func__.__doc__ += self.info.inputs_description

        # self.arrow_schema = pa.schema(field for field in self.info.features.type)
        self.buf_writer = None
        self.writer = None
        self.writer_batch_size = None
        self.data = None

        # This is the cache file we store our predictions/references in
        # Keep it None for now so we can (cloud)pickle the object
        self.cache_file_name = None
        self.filelock = None
        self.rendez_vous_lock = None

        # This is all the cache files on which we have a lock when we are in a distributed setting
        self.file_paths = None
        self.filelocks = None

    def __len__(self):
        """Return the number of examples (predictions or predictions/references pair)
        currently stored in the metric's cache.
        """
        return 0 if self.writer is None else len(self.writer)

    def __repr__(self):
        return (
            f'Metric(name: "{self.name}", features: {self.features}, '
            f'usage: """{self.inputs_description}""", '
            f"stored examples: {len(self)})"
        )

    def _build_data_dir(self):
        """Path of this metric in cache_dir:
        Will be:
            self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
        If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
        """
        builder_data_dir = self._data_dir_root
        builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
        os.makedirs(builder_data_dir, exist_ok=True)
        return builder_data_dir

    def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
        """ Create a new cache file. If the default cache file is used, we generated a new hash. """
        file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
        filelock = None
        for i in range(self.max_concurrent_cache_files):
            filelock = FileLock(file_path + ".lock")
            try:
                filelock.acquire(timeout=timeout)
            except Timeout:
                # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
                # We raise an error
                if self.num_process != 1:
                    raise ValueError(
                        f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
                        f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid colision "
                        f"between distributed metric instances."
                    )
                if i == self.max_concurrent_cache_files - 1:
                    raise ValueError(
                        f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
                        f"You should set a larger value of max_concurrent_cache_files when creating the metric "
                        f"(current value is {self.max_concurrent_cache_files})."
                    )
                # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
                file_uuid = str(uuid.uuid4())
                file_path = os.path.join(
                    self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
                )
            else:
                break

        return file_path, filelock

    def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
        """Get a lock on all the cache files in a distributed setup.
        We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
        """
        if self.num_process == 1:
            if self.cache_file_name is None:
                raise ValueError(
                    "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
                    "at least once before calling `compute`."
                )
            file_paths = [self.cache_file_name]
        else:
            file_paths = [
                os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
                for process_id in range(self.num_process)
            ]

        # Let's acquire a lock on each process files to be sure they are finished writing
        filelocks = []
        for process_id, file_path in enumerate(file_paths):
            filelock = FileLock(file_path + ".lock")
            try:
                filelock.acquire(timeout=self.timeout)
            except Timeout:
                raise ValueError(f"Cannot acquire lock on cached file {file_path} for process {process_id}.")
            else:
                filelocks.append(filelock)

        return file_paths, filelocks

    def _check_all_processes_locks(self):
        expected_lock_file_names = [
            os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
            for process_id in range(self.num_process)
        ]
        for expected_lock_file_name in expected_lock_file_names:
            nofilelock = FileFreeLock(expected_lock_file_name)
            try:
                nofilelock.acquire(timeout=self.timeout)
            except Timeout:
                raise ValueError(
                    f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
                )
            else:
                nofilelock.release()

    def _check_rendez_vous(self):
        expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
        nofilelock = FileFreeLock(expected_lock_file_name)
        try:
            nofilelock.acquire(timeout=self.timeout)
        except Timeout:
            raise ValueError(
                f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
            )
        else:
            nofilelock.release()
        lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
        rendez_vous_lock = FileLock(lock_file_name)
        try:
            rendez_vous_lock.acquire(timeout=self.timeout)
        except Timeout:
            raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.")
        else:
            rendez_vous_lock.release()

    def _finalize(self):
        """Close all the writing process and load/gather the data
        from all the nodes if main node or all_process is True.
        """
        if self.writer is not None:
            self.writer.finalize()
        self.writer = None
        if self.filelock is not None:
            self.filelock.release()

        if self.keep_in_memory:
            # Read the predictions and references
            reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
            self.data = Dataset.from_buffer(self.buf_writer.getvalue())

        elif self.process_id == 0:
            # Let's acquire a lock on each node files to be sure they are finished writing
            file_paths, filelocks = self._get_all_cache_files()

            # Read the predictions and references
            try:
                reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
                self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
            except FileNotFoundError:
                raise ValueError(
                    "Error in finalize: another metric instance is already using the local cache file. "
                    "Please specify an experiment_id to avoid colision between distributed metric instances."
                )

            # Store file paths and locks and we will release/delete them after the computation.
            self.file_paths = file_paths
            self.filelocks = filelocks

    def compute(self, *args, **kwargs) -> Optional[dict]:
        """Compute the metrics.

        Args:
            We disallow the usage of positional arguments to prevent mistakes
            `predictions` (Optional list/array/tensor): predictions
            `references` (Optional list/array/tensor): references
            `**kwargs` (Optional other kwargs): will be forwared to the metrics :func:`_compute` method (see details in the docstring)

        Return:
            Dictionnary with the metrics if this metric is run on the main process (process_id == 0)
            None if the metric is not run on the main process (process_id != 0)
        """
        if args:
            raise ValueError("Please call `compute` using keyword arguments.")

        predictions = kwargs.pop("predictions", None)
        references = kwargs.pop("references", None)

        if predictions is not None:
            self.add_batch(predictions=predictions, references=references)
        self._finalize()

        self.cache_file_name = None
        self.filelock = None

        if self.process_id == 0:
            self.data.set_format(type=self.info.format)

            predictions = self.data["predictions"]
            references = self.data["references"]
            with temp_seed(self.seed):
                output = self._compute(predictions=predictions, references=references, **kwargs)

            if self.buf_writer is not None:
                self.buf_writer = None
                del self.data
                self.data = None
            else:
                # Release locks and delete all the cache files
                for filelock, file_path in zip(self.filelocks, self.file_paths):
                    logger.info(f"Removing {file_path}")
                    del self.data
                    self.data = None
                    del self.writer
                    self.writer = None
                    os.remove(file_path)
                    filelock.release()

            return output
        else:
            return None

    def add_batch(self, *, predictions=None, references=None):
        """
        Add a batch of predictions and references for the metric's stack.
        """
        batch = {"predictions": predictions, "references": references}
        batch = self.info.features.encode_batch(batch)
        if self.writer is None:
            self._init_writer()
        try:
            self.writer.write_batch(batch)
        except pa.ArrowInvalid:
            raise ValueError(
                f"Predictions and/or references don't match the expected format.\n"
                f"Expected format: {self.features},\n"
                f"Input predictions: {predictions},\n"
                f"Input references: {references}"
            )

    def add(self, *, prediction=None, reference=None):
        """Add one prediction and reference for the metric's stack."""
        example = {"predictions": prediction, "references": reference}
        example = self.info.features.encode_example(example)
        if self.writer is None:
            self._init_writer()
        try:
            self.writer.write(example)
        except pa.ArrowInvalid:
            raise ValueError(
                f"Prediction and/or reference don't match the expected format.\n"
                f"Expected format: {self.features},\n"
                f"Input predictions: {prediction},\n"
                f"Input references: {reference}"
            )

    def _init_writer(self, timeout=1):
        if self.num_process > 1:
            if self.process_id == 0:
                file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
                self.rendez_vous_lock = FileLock(file_path)
                try:
                    self.rendez_vous_lock.acquire(timeout=timeout)
                except TimeoutError:
                    raise ValueError(
                        f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
                        f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid colision "
                        f"between distributed metric instances."
                    )

        if self.keep_in_memory:
            self.buf_writer = pa.BufferOutputStream()
            self.writer = ArrowWriter(
                features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
            )
        else:
            self.buf_writer = None

            # Get cache file name and lock it
            if self.cache_file_name is None or self.filelock is None:
                cache_file_name, filelock = self._create_cache_file()  # get ready
                self.cache_file_name = cache_file_name
                self.filelock = filelock

            self.writer = ArrowWriter(
                features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
            )
        # Setup rendez-vous here if
        if self.num_process > 1:
            if self.process_id == 0:
                self._check_all_processes_locks()  # wait for everyone to be ready
                self.rendez_vous_lock.release()  # let everyone go
            else:
                self._check_rendez_vous()  # wait for master to be ready and to let everyone go

    def _info(self) -> MetricInfo:
        """Construct the MetricInfo object. See `MetricInfo` for details.

        Warning: This function is only called once and the result is cached for all
        following .info() calls.

        Returns:
            info: (MetricInfo) The metrics information
        """
        raise NotImplementedError

    def download_and_prepare(
        self,
        download_config: Optional[DownloadConfig] = None,
        dl_manager: Optional[DownloadManager] = None,
        **download_and_prepare_kwargs,
    ):
        """Downloads and prepares dataset for reading.

        Args:
            download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
            dl_manager (Optional ``datasets.DownloadManager``): specific Download Manger to use
        """
        if dl_manager is None:
            if download_config is None:
                download_config = DownloadConfig()
                download_config.cache_dir = os.path.join(self.data_dir, "downloads")
                download_config.force_download = False

            dl_manager = DownloadManager(
                dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
            )

        self._download_and_prepare(dl_manager)

    def _download_and_prepare(self, dl_manager):
        """Downloads and prepares resources for the metric.

        This is the internal implementation to overwrite called when user calls
        `download_and_prepare`. It should download all required resources for the metric.

        Args:
            dl_manager: (DownloadManager) `DownloadManager` used to download and cache
                data..
        """
        return None

    def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
        """ This method defines the common API for all the metrics in the library """
        raise NotImplementedError

    def __del__(self):
        if self.filelock is not None:
            self.filelock.release()
        if self.rendez_vous_lock is not None:
            self.rendez_vous_lock.release()
        del self.writer
        del self.data
Esempio n. 22
0
 def handle_client(self, conn, addr):
     """receive request from the client and send response"""
     self.print_debug("Connection established with client " + addr[0] + ":" + str(addr[1]) + " at " +
                      time.strftime("%a, %d %b %Y %I:%M:%S %p %Z", time.gmtime()))
     try:
         while True:
             _receive_data = []
             _tmp = conn.recv(2048)
             if _tmp:
                 _tmp = _tmp.decode("utf-8")  # convert to text string
                 _receive_data.append(_tmp)  # append to receive array
             else:
                 break
             str_received_data = ''
             for data in _receive_data:
                 str_received_data += data  # convert received array to string
             print(str_received_data)
             first_line = str_received_data.split("\r\n", 1)
             req_type = first_line[0].split(" ")[0]
             uri = first_line[0].split(" ")[1]
             headers = {}
             header_body = str_received_data.split("\r\n\r\n")
             count = 0
             for line in header_body[0].split("\r\n"):
                 if count != 0:
                     headers[line.split(":")[0].strip()] = line.split(":")[1].strip()
                 else:
                     count = count + 1
             count = -1
             body = ''
             if len(header_body) > 1:
                 if header_body[0].find("Content-Length:") > -1:
                     last = header_body[0].find("\r\n", header_body[0].find("Content-Length:"))
                     --last
                     count = int(header_body[0][header_body[0].find("Content-Length:") + 16:last])
                     body = ''
                     for line in header_body[1]:
                         if count > 0:
                             body += line
                         --count
             http_object = HTTPObject(req_type, uri, headers, body)
             # Sending code
             message = 'HTTP/1.1 '
             if ".." in uri:
                 self.print_debug("Access Denied " + uri)
                 message += "403 Forbidden" + "\r\n"
                 message += "Content-type: text/plain" + "\r\n"
                 message += "Content-Disposition: inline" + "\r\n\r\n"
             else:
                 if HTTPObject.get_req_type(http_object).lower() == "get":
                     try:
                         if HTTPObject.get_uri(http_object) == "/":
                             message += "200 OK" + "\r\n"
                             message += "Date: " + formatdate(timeval=None, localtime=False, usegmt=True) + "\r\n"
                             message += "Server:" + socket.gethostname() + "\r\n"
                             self.print_debug("GET DIR " + os.getcwd().replace("\\", "/") + "/" +
                                              getattr(self._server_obj, '_path') + HTTPObject.get_uri(http_object))
                             working_dir = os.getcwd().replace("\\", "/") + "/" + getattr(self._server_obj, '_path') \
                                           + HTTPObject.get_uri(http_object)
                             list_files = os.listdir(working_dir)
                             str_files = ''
                             for file in list_files:
                                 str_files += file + "\r\n"
                             message += "Content-Length: " + str(len("{\r\n" + str_files + "}")) + "\r\n"
                             message += "Content-Type: text/directory" + "\r\n"
                             if "Content-Disposition" in HTTPObject.get_headers(http_object):
                                 if HTTPObject.get_headers(http_object)["Content-Disposition"] == "attachment":
                                     message += "Content-Disposition: attachment/output" + "\r\n"
                                 else:
                                     message += "Content-Disposition: " + HTTPObject.get_headers(http_object) \
                                                 ["Content-Disposition"] + "\r\n"
                             elif "inline" in HTTPObject.get_uri(http_object):
                                 message += "Content-Disposition: inline" + "\r\n"
                             else:
                                 message += "Content-Disposition: attachment/output" + "\r\n"
                             message += "\r\n"
                             message += "{\r\n" + str_files + "}"
                         else:
                             working_file = os.getcwd().replace("\\", "/") + "/" + getattr(self._server_obj, '_path') \
                                            + HTTPObject.get_uri(http_object)
                             if "Content-Type" in HTTPObject.get_headers(http_object):
                                 if HTTPObject.get_headers(http_object)["Content-Type"] == "text/plain":
                                     if not working_file.endswith(".txt"):
                                         working_file += ".txt"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/html":
                                     if not working_file.endswith(".html"):
                                         working_file += ".html"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/html":
                                     if not working_file.endswith(".html"):
                                         working_file += ".html"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/xml":
                                     if not working_file.endswith(".xml"):
                                         working_file += ".xml"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "application/json":
                                     if not working_file.endswith(".json"):
                                         working_file += ".json"
                             self.print_debug("GET File " + working_file)
                             if not os.path.isfile(working_file):
                                 message += "404 Not Found" + "\r\n"
                                 message += "Date: " + formatdate(timeval=None, localtime=False,
                                                                  usegmt=True) + "\r\n"
                                 message += "Server: " + socket.gethostname() + "\r\n"
                                 if "Content-Disposition" in HTTPObject.get_headers(http_object):
                                     if HTTPObject.get_headers(http_object)["Content-Disposition"] == "attachment":
                                         message += "Content-Disposition: attachment/output" + "\r\n"
                                     else:
                                         message += "Content-Disposition: " + HTTPObject.get_headers(http_object) \
                                                     ["Content-Disposition"] + "\r\n"
                                 elif "inline" in HTTPObject.get_uri(http_object):
                                     message += "Content-Disposition: inline" + "\r\n"
                                 else:
                                     message += "Content-Disposition: inline" + "\r\n"
                                 message += "\r\n"
                             else:
                                 message += "200 OK" + "\r\n"
                                 message += "Date: " + formatdate(timeval=None, localtime=False,
                                                                  usegmt=True) + "\r\n"
                                 message += "Server: " + socket.gethostname() + "\r\n"
                                 fr = open(working_file, 'r')
                                 file_data = fr.read()
                                 if "Content-Type" in HTTPObject.get_headers(http_object):
                                     message += "Content-Type: " + HTTPObject.get_headers(http_object)[
                                                 "Content-Type"] + "\r\n"
                                 else:
                                     if working_file.endswith(".txt"):
                                         message += "Content-Type: text/plain" + "\r\n"
                                     elif working_file.endswith(".html"):
                                         message += "Content-Type: text/html" + "\r\n"
                                     elif working_file.endswith(".xml"):
                                         message += "Content-Type: text/xml" + "\r\n"
                                     elif working_file.endswith(".json"):
                                         message += "Content-Type: application/json" + "\r\n"
                                     else:
                                         message += "Content-Type: text/plain" + "\r\n"
                                 message += "Content-Length: " + str(len(file_data)) + "\r\n"
                                 if "Content-Disposition" in HTTPObject.get_headers(http_object):
                                     if HTTPObject.get_headers(http_object)["Content-Disposition"] == "attachment":
                                         message += "Content-Disposition: attachment/output" + "\r\n"
                                     else:
                                         message += "Content-Disposition: " + HTTPObject.get_headers(http_object) \
                                                     ["Content-Disposition"] + "\r\n"
                                 elif "inline" in HTTPObject.get_uri(http_object):
                                     message += "Content-Disposition: inline" + "\r\n"
                                 else:
                                     message += "Content-Disposition: inline" + "\r\n"
                                 message += "\r\n"
                                 message += file_data
                     except OSError as msg:
                         self.print_debug(msg)
                         message = "HTTP/1.1 400 Bad Request\r\n\r\n"
                         message += msg.strerror
                 elif HTTPObject.get_req_type(http_object).lower() == "post":
                     try:
                         if HTTPObject.get_uri(http_object) != "":
                             working_file = os.getcwd().replace("\\", "/") + "/" + getattr(self._server_obj, '_path') \
                                            + HTTPObject.get_uri(http_object)
                             if "Content-Type" in HTTPObject.get_headers(http_object):
                                 if HTTPObject.get_headers(http_object)["Content-Type"] == "text/plain":
                                     if not working_file.endswith(".txt"):
                                         working_file += ".txt"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/html":
                                     if not working_file.endswith(".html"):
                                         working_file += ".html"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/html":
                                     if not working_file.endswith(".html"):
                                         working_file += ".html"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/xml":
                                     if not working_file.endswith(".xml"):
                                         working_file += ".xml"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "application/json":
                                     if not working_file.endswith(".json"):
                                         working_file += ".json"
                             self.print_debug("POST File " + working_file)
                             path = pathlib.Path(working_file)
                             path.parent.mkdir(parents=True, exist_ok=True)
                             lock_path = working_file + ".lock"
                             my_lock = FileLock(lock_path, timeout=2)
                             my_lock.acquire()
                             try:
                                 open(working_file, "a").write(HTTPObject.get_data(http_object) + "\n")
                             finally:
                                 my_lock.release()
                             message += "200 OK" + "\r\n"
                             message += "Date: " + formatdate(timeval=None, localtime=False, usegmt=True) + "\r\n"
                             message += "Server: " + socket.gethostname() + "\r\n"
                             if "Content-Type" in HTTPObject.get_headers(http_object):
                                 message += "Content-Type: " + HTTPObject.get_headers(http_object)[
                                     "Content-Type"] + "\r\n"
                             else:
                                 if working_file.endswith(".txt"):
                                     message += "Content-Type: text/plain" + "\r\n"
                                 elif working_file.endswith(".html"):
                                     message += "Content-Type: text/html" + "\r\n"
                                 elif working_file.endswith(".json"):
                                     message += "Content-Type: application/json" + "\r\n"
                                 elif working_file.endswith(".xml"):
                                     message += "Content-Type: text/xml" + "\r\n"
                                 else:
                                     message += "Content-Type: text/plain" + "\r\n"
                             message += "Content-Length: " + str(len(HTTPObject.get_data(http_object))) + "\r\n"
                             if "Content-Disposition" in HTTPObject.get_headers(http_object):
                                 if HTTPObject.get_headers(http_object)["Content-Disposition"] == "attachment":
                                     message += "Content-Disposition: attachment/output" + "\r\n"
                                 else:
                                     message += "Content-Disposition: " + HTTPObject.get_headers(http_object) \
                                         ["Content-Disposition"] + "\r\n"
                             elif "inline" in HTTPObject.get_uri(http_object):
                                 message += "Content-Disposition: inline" + "\r\n"
                             else:
                                 message += "Content-Disposition: inline" + "\r\n"
                             message += "\r\n"
                             message += HTTPObject.get_data(http_object)
                     except OSError as msg:
                         self.print_debug(msg)
                         message = "HTTP/1.1 400 Bad Request\r\n\r\n"
                         message += msg.strerror
             print("Sending response message to client " + addr[0])
             print(message)
             conn.sendall(bytes(message, 'utf-8'))
             self.print_debug("Connection closed with client " + addr[0] + ":" + str(addr[1]) + " at " +
                              time.strftime("%a, %d %b %Y %I:%M:%S %p %Z", time.gmtime()))
             break
     finally:
         conn.close()
Esempio n. 23
0
def main():
    parser = argparse.ArgumentParser(
        description='project management.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        parents=[get_base_parser(tool_version=__version__)])

    parser.add_argument('-b',
                        '--base',
                        default="/var/opengrok",
                        help='OpenGrok instance base directory')
    parser.add_argument('-R',
                        '--roconfig',
                        help='OpenGrok read-only configuration file')
    parser.add_argument('-U',
                        '--uri',
                        default='http://localhost:8080/source',
                        help='URI of the webapp with context path')
    parser.add_argument('-c',
                        '--configmerge',
                        help='path to the ConfigMerge binary')
    parser.add_argument('--java',
                        help='Path to java binary '
                        '(needed for config merge program)')
    parser.add_argument('-j', '--jar', help='Path to jar archive to run')
    parser.add_argument('-u',
                        '--upload',
                        action='store_true',
                        help='Upload configuration at the end')
    parser.add_argument('-n',
                        '--noop',
                        action='store_true',
                        default=False,
                        help='Do not run any commands or modify any config'
                        ', just report. Usually implies '
                        'the --debug option.')
    parser.add_argument('-N',
                        '--nosourcedelete',
                        action='store_true',
                        default=False,
                        help='Do not delete source code when '
                        'deleting a project')
    add_http_headers(parser)
    parser.add_argument(
        '--api_timeout',
        type=int,
        help='Set response timeout in seconds for RESTful API calls')

    group = parser.add_mutually_exclusive_group()
    group.add_argument('-a',
                       '--add',
                       metavar='project',
                       nargs='+',
                       help='Add project (assumes its source is available '
                       'under source root')
    group.add_argument('-d',
                       '--delete',
                       metavar='project',
                       nargs='+',
                       help='Delete project and its data and source code')
    group.add_argument('-r',
                       '--refresh',
                       action='store_true',
                       help='Refresh configuration. If read-only '
                       'configuration is supplied, it is merged '
                       'with current '
                       'configuration.')

    try:
        args = parser.parse_args()
    except ValueError as e:
        fatal(e)

    doit = not args.noop
    configmerge = None

    #
    # Setup logger as a first thing after parsing arguments so that it can be
    # used through the rest of the program.
    #
    logger = get_console_logger(get_class_basename(), args.loglevel)

    headers = get_headers(args.header)

    if args.nosourcedelete and not args.delete:
        logger.error("The no source delete option is only valid for delete")
        sys.exit(FAILURE_EXITVAL)

    # Set the base directory
    if args.base:
        if path.isdir(args.base):
            logger.debug("Using {} as instance base".format(args.base))
        else:
            logger.error(
                "Not a directory: {}\n"
                "Set the base directory with the --base option.".format(
                    args.base))
            sys.exit(FAILURE_EXITVAL)

    # If read-only configuration file is specified, this means read-only
    # configuration will need to be merged with active webapp configuration.
    # This requires config merge tool to be run so couple of other things
    # need to be checked.
    if args.roconfig:
        if path.isfile(args.roconfig):
            logger.debug("Using {} as read-only config".format(args.roconfig))
        else:
            logger.error("File {} does not exist".format(args.roconfig))
            sys.exit(FAILURE_EXITVAL)

        configmerge_file = get_command(logger, args.configmerge,
                                       "opengrok-config-merge")
        if configmerge_file is None:
            logger.error("Use the --configmerge option to specify the path to"
                         "the config merge script")
            sys.exit(FAILURE_EXITVAL)

        configmerge = [configmerge_file]
        if args.loglevel:
            configmerge.append('-l')
            configmerge.append(str(args.loglevel))

        if args.jar is None:
            logger.error('jar file needed for config merge tool, '
                         'use --jar to specify one')
            sys.exit(FAILURE_EXITVAL)

    uri = args.uri
    if not is_web_uri(uri):
        logger.error("Not a URI: {}".format(uri))
        sys.exit(FAILURE_EXITVAL)
    logger.debug("web application URI = {}".format(uri))

    lock = FileLock(
        path.join(tempfile.gettempdir(),
                  path.basename(sys.argv[0]) + ".lock"))
    try:
        with lock.acquire(timeout=0):
            if args.add:
                for proj in args.add:
                    project_add(doit=doit,
                                logger=logger,
                                project=proj,
                                uri=uri,
                                headers=headers,
                                timeout=args.api_timeout)

                config_refresh(doit=doit,
                               logger=logger,
                               basedir=args.base,
                               uri=uri,
                               configmerge=configmerge,
                               jar_file=args.jar,
                               roconfig=args.roconfig,
                               java=args.java,
                               headers=headers,
                               timeout=args.api_timeout)
            elif args.delete:
                for proj in args.delete:
                    project_delete(logger=logger,
                                   project=proj,
                                   uri=uri,
                                   doit=doit,
                                   deletesource=not args.nosourcedelete,
                                   headers=headers,
                                   timeout=args.api_timeout)

                config_refresh(doit=doit,
                               logger=logger,
                               basedir=args.base,
                               uri=uri,
                               configmerge=configmerge,
                               jar_file=args.jar,
                               roconfig=args.roconfig,
                               java=args.java,
                               headers=headers,
                               timeout=args.api_timeout)
            elif args.refresh:
                config_refresh(doit=doit,
                               logger=logger,
                               basedir=args.base,
                               uri=uri,
                               configmerge=configmerge,
                               jar_file=args.jar,
                               roconfig=args.roconfig,
                               java=args.java,
                               headers=headers,
                               timeout=args.api_timeout)
            else:
                parser.print_help()
                sys.exit(FAILURE_EXITVAL)

            if args.upload:
                main_config = get_config_file(basedir=args.base)
                if path.isfile(main_config):
                    if doit:
                        with io.open(main_config, mode='r',
                                     encoding="utf-8") as config_file:
                            config_data = config_file.read().encode("utf-8")
                            if not set_configuration(logger,
                                                     config_data,
                                                     uri,
                                                     headers=headers,
                                                     timeout=args.api_timeout):
                                sys.exit(FAILURE_EXITVAL)
                else:
                    logger.error("file {} does not exist".format(main_config))
                    sys.exit(FAILURE_EXITVAL)
    except Timeout:
        logger.warning("Already running, exiting.")
        sys.exit(FAILURE_EXITVAL)
Esempio n. 24
0
class Db(object):
    """
        Basic file storage for a JSON object.
    """
    def where_not_in(self, column, *values):
        """
        Selects results which do not match the given column/values expression.

        Args:
            column (str): The named field to test against.

            values (str): Vales to search for.  A record will not be returned if the field named
                in *column* is contained inside of the list of values given.
        """
        return [x for x in self.data if x[column] not in values]

    def all_jobs(self):
        "Retrieve all records."
        return self.data

    def values(self):
        "Synonym for #all_jobs."
        return self.data

    def find_by_remote_job_id(self, job_id):
        """
        Finds a record by the id number for that job on the remote cluster.

        Args:
            job_id(str): the job id for this job as scheduled or running on the remote.

        Returns:
            A :class:`dict` object containing various attributes of the job on the local and the remote::

                {  
                    local_id: '123123',
                    local_state: 3,
                    local_wd: '/var/sge_working_dir',
                    remote_id: '1234324',
                    remote_state: 2,
                    remote_wd: '/var/remote_working_dir',
                    last_checked: '2017-12-27 16:35:30.898984'
                }
        
        """
        return next((x for x in self.values() if x[R_ID] == job_id), None)

    def find_by_local_job_id(self, job_id):

        return next((x for x in self.values() if x[L_ID] == job_id), None)

    def insert(self, local_id, local_state, local_wd, remote_id, remote_state,
               remote_wd):
        self.data.append(
            dict(local_id=local_id,
                 local_state=local_state,
                 local_wd=local_wd,
                 remote_id=remote_id,
                 remote_state=remote_state,
                 remote_wd=remote_wd,
                 last_checked=str(datetime.now())))

    def update(self, job):
        job['last_checked'] = str(datetime.now())

    def delete(self, job):
        return self.data.remove(job)

    def __init__(self):
        self.lock = FileLock(LOCKFILE)
        self.data = None

    def save(self):
        with open(DBFILE, 'w') as file:
            file.write(dumps(self.data))

    def open(self):
        self.lock.acquire(timeout=2)
        with open(DBFILE) as file:
            self.data = loads(file.read())

    def close(self):
        self.save()
        self.data = None
        self.lock.release()

    def __enter__(self):
        self.open()
        return self

    def __exit__(self, x, y, z):
        "Exit method for resource"
        self.close()
Esempio n. 25
0
    def _instance_iterator(self, file_path: str) -> Iterable[Instance]:
        cache_file: Optional[str] = None
        if self._cache_directory:
            cache_file = self._get_cache_location_for_file_path(file_path)

        if cache_file is not None and os.path.exists(cache_file):
            cache_file_lock = FileLock(cache_file + ".lock",
                                       timeout=self.CACHE_FILE_LOCK_TIMEOUT)
            try:
                cache_file_lock.acquire()
                # We make an assumption here that if we can obtain the lock, no one will
                # be trying to write to the file anymore, so it should be safe to release the lock
                # before reading so that other processes can also read from it.
                cache_file_lock.release()
                logger.info("Reading instances from cache %s", cache_file)
                with open(cache_file) as data_file:
                    yield from self._multi_worker_islice(
                        data_file, transform=self.deserialize_instance)
            except Timeout:
                logger.warning(
                    "Failed to acquire lock on dataset cache file within %d seconds. "
                    "Cannot use cache to read instances.",
                    self.CACHE_FILE_LOCK_TIMEOUT,
                )
                yield from self._multi_worker_islice(self._read(file_path),
                                                     ensure_lazy=True)
        elif cache_file is not None and not os.path.exists(cache_file):
            instances = self._multi_worker_islice(self._read(file_path),
                                                  ensure_lazy=True)
            # The cache file doesn't exist so we'll try writing to it.
            if self.max_instances is not None:
                # But we don't write to the cache when max_instances is specified.
                logger.warning(
                    "Skipping writing to data cache since max_instances was specified."
                )
                yield from instances
            elif util.is_distributed() or (get_worker_info()
                                           and get_worker_info().num_workers):
                # We also shouldn't write to the cache if there's more than one process loading
                # instances since each worker only receives a partial share of the instances.
                logger.warning(
                    "Can't cache data instances when there are multiple processes loading data"
                )
                yield from instances
            else:
                try:
                    with FileLock(cache_file + ".lock",
                                  timeout=self.CACHE_FILE_LOCK_TIMEOUT):
                        with CacheFile(cache_file, mode="w+") as cache_handle:
                            logger.info("Caching instances to temp file %s",
                                        cache_handle.name)
                            for instance in instances:
                                cache_handle.write(
                                    self.serialize_instance(instance) + "\n")
                                yield instance
                except Timeout:
                    logger.warning(
                        "Failed to acquire lock on dataset cache file within %d seconds. "
                        "Cannot write to cache.",
                        self.CACHE_FILE_LOCK_TIMEOUT,
                    )
                    yield from instances
        else:
            # No cache.
            yield from self._multi_worker_islice(self._read(file_path),
                                                 ensure_lazy=True)
Esempio n. 26
0
def main():
    parser = argparse.ArgumentParser(
        description='Manage parallel workers.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        parents=[get_base_parser(tool_version=__version__)])
    parser.add_argument('-w',
                        '--workers',
                        default=multiprocessing.cpu_count(),
                        help='Number of worker processes',
                        type=int)

    # There can be only one way how to supply list of projects to process.
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-d', '--directory', help='Directory to process')
    group1.add_argument('-P',
                        '--project',
                        nargs='*',
                        help='project(s) to process')

    parser.add_argument('-I',
                        '--indexed',
                        action='store_true',
                        help='Sync indexed projects only')
    parser.add_argument('-i',
                        '--ignore_errors',
                        nargs='*',
                        help='ignore errors from these projects')
    parser.add_argument('--ignore_project',
                        nargs='+',
                        help='do not process given project(s)')
    parser.add_argument('-c',
                        '--config',
                        required=True,
                        help='config file in JSON/YAML format')
    parser.add_argument('-U',
                        '--uri',
                        default='http://localhost:8080/source',
                        help='URI of the webapp with context path')
    parser.add_argument('-f',
                        '--driveon',
                        action='store_true',
                        default=False,
                        help='continue command sequence processing even '
                        'if one of the commands requests break')
    parser.add_argument('--nolock',
                        action='store_false',
                        default=True,
                        help='do not acquire lock that prevents multiple '
                        'instances from running')
    parser.add_argument('--api_timeout',
                        type=int,
                        default=3,
                        help='Set response timeout in seconds'
                        'for RESTful API calls')
    parser.add_argument(
        '--async_api_timeout',
        type=int,
        default=300,
        help='Set timeout in seconds for asynchronous REST API calls')
    parser.add_argument('--check_config',
                        action='store_true',
                        help='check configuration and exit')
    add_http_headers(parser)

    try:
        args = parser.parse_args()
    except ValueError as e:
        return fatal(e, exit=False)

    logger = get_console_logger(get_class_basename(), args.loglevel)

    uri = args.uri
    if not is_web_uri(uri):
        logger.error("Not a URI: {}".format(uri))
        return FAILURE_EXITVAL
    logger.debug("web application URI = {}".format(uri))

    # First read and validate configuration file as it is mandatory argument.
    config = read_config(logger, args.config)
    if config is None:
        logger.error("Cannot read config file from {}".format(args.config))
        return FAILURE_EXITVAL

    # Changing working directory to root will avoid problems when running
    # programs via sudo/su. Do this only after the config file was read
    # so that its path can be specified as relative.
    try:
        os.chdir("/")
    except OSError:
        logger.error("cannot change working directory to /", exc_info=True)
        return FAILURE_EXITVAL

    try:
        commands = config["commands"]
    except KeyError:
        logger.error("The config file has to contain key \"commands\"")
        return FAILURE_EXITVAL

    headers = get_headers(args.header)
    config_headers = config.get("headers")
    if config_headers:
        logger.debug(
            "Updating HTTP headers with headers from the configuration: {}".
            format(config_headers))
        headers.update(config_headers)

    directory = args.directory
    if not args.directory and not args.project and not args.indexed:
        # Assume directory, get the source root value from the webapp.
        directory = get_config_value(logger,
                                     'sourceRoot',
                                     uri,
                                     headers=headers,
                                     timeout=args.api_timeout)
        if not directory:
            logger.error("Neither -d or -P or -I specified and cannot get "
                         "source root from the webapp")
            return FAILURE_EXITVAL
        else:
            logger.info("Assuming directory: {}".format(directory))

    ignore_errors = []
    if args.ignore_errors:
        ignore_errors = args.ignore_errors
    else:
        try:
            ignore_errors = config["ignore_errors"]
        except KeyError:
            pass
    logger.debug("Ignoring errors from projects: {}".format(ignore_errors))

    dirs_to_process = []
    if args.project:
        dirs_to_process = args.project
        logger.debug("Processing directories: {}".format(dirs_to_process))
    elif args.indexed:
        indexed_projects = list_indexed_projects(logger,
                                                 uri,
                                                 headers=headers,
                                                 timeout=args.api_timeout)
        logger.debug(
            "Processing indexed projects: {}".format(indexed_projects))

        if indexed_projects:
            for line in indexed_projects:
                dirs_to_process.append(line.strip())
        else:
            logger.error("cannot get list of projects")
            return FAILURE_EXITVAL
    else:
        logger.debug("Processing directory {}".format(directory))
        for entry in os.listdir(directory):
            if path.isdir(path.join(directory, entry)):
                dirs_to_process.append(entry)

    ignored_projects = []
    config_ignored_projects = config.get("ignore_projects")
    if config_ignored_projects:
        logger.debug(
            "Updating list of ignored projects list from the configuration: {}"
            .format(config_ignored_projects))
        ignored_projects.extend(config_ignored_projects)

    if args.ignore_project:
        logger.debug(
            "Updating list of ignored projects based on options: {}".format(
                args.ignore_project))
        ignored_projects.extend(args.ignore_project)

    if ignored_projects:
        dirs_to_process = list(set(dirs_to_process) - set(ignored_projects))
        logger.debug("Removing projects: {}".format(ignored_projects))

    logger.debug("directories to process: {}".format(dirs_to_process))

    if args.project and len(args.project) == 1:
        lockfile_name = args.project[0]
    else:
        lockfile_name = os.path.basename(sys.argv[0])

    if args.nolock:
        try:
            r = do_sync(args.loglevel,
                        commands,
                        config.get("cleanup"),
                        dirs_to_process,
                        ignore_errors,
                        uri,
                        args.workers,
                        driveon=args.driveon,
                        http_headers=headers,
                        timeout=args.api_timeout,
                        api_timeout=args.async_api_timeout,
                        check_config=args.check_config)
        except CommandConfigurationException as exc:
            logger.error("Invalid configuration: {}".format(exc))
            return FAILURE_EXITVAL
    else:
        lock = FileLock(
            os.path.join(tempfile.gettempdir(), lockfile_name + ".lock"))
        try:
            with lock.acquire(timeout=0):
                try:
                    r = do_sync(args.loglevel,
                                commands,
                                config.get("cleanup"),
                                dirs_to_process,
                                ignore_errors,
                                uri,
                                args.workers,
                                driveon=args.driveon,
                                http_headers=headers,
                                timeout=args.api_timeout,
                                api_timeout=args.async_api_timeout,
                                check_config=args.check_config)
                except CommandConfigurationException as exc:
                    logger.error("Invalid configuration: {}".format(exc))
                    return FAILURE_EXITVAL
        except Timeout:
            logger.warning("Already running")
            return FAILURE_EXITVAL

    return r
Esempio n. 27
0
class CandleConnector():
    def __init__(self):
        self.lock = FileLock("config.csv.lock")
        # make dict here that stores the amount for each coin
        self.config = "config.csv"
        self.candles = candles.BinaceConnector()

    def readConfig(self):
        self.lock.acquire()
        df = pd.read_csv(self.config,
                         encoding='utf8',
                         delimiter=',',
                         names=[
                             'coin', 'capital', 'starting', 'limit',
                             'currentPrice', 'autobought', 'takeprofit',
                             'updatetime', 'orderid', 'takeProfitAmount',
                             'takeProfitOrder', 'delta'
                         ])
        self.lock.release()
        df.set_index('coin', inplace=True)
        return df

    #get the current config
    def getCoinConfigData(self, coin):
        df = self.readConfig()
        return df.loc[coin]

    #save a new copy of the config
    def setCoinConfigData(self, df):
        self.lock.acquire()
        df.to_csv(f'config.csv', mode='w', header=False, index=True)
        self.lock.release()

    def getAutoBoughtAmount(self, coin):
        return float(self.getCoinConfigData(coin)['autobought'])

    # helper for buying a number of coins at current price
    def orderNumber(self, coin, number):
        return (self.candles.buyMarket(coin, number))

    # gives you a quote for a coin
    def getQuote(self, coin):
        return float(self.candles.getCoinPrice(coin))

    def getBuyPower(self):
        return float(self.candles.getUSD())

    # set an order for an number amount
    def orderAmount(self, coin, amount):
        return (self.candles.order_buy_crypto_by_price(coin, amount))

    # write out to a log file
    def logit(self, message, destination):
        with open(f"testData/{destination}.txt", "a") as f:
            f.write(message)
            f.write("\n")

    def saveCoinBuyData(self,
                        coin,
                        price,
                        amount,
                        setcap=None,
                        setupdatetime=180,
                        order="none"):
        df = self.readConfig()
        if setcap is not None:
            df.at[coin, 'capital'] = setcap
        df.at[coin, 'starting'] = price
        df.at[coin, 'autobought'] = amount
        df.at[coin, 'limit'] = price * df.at[coin, 'takeprofit']
        df.at[coin, 'updatetime'] = setupdatetime
        df.at[coin, 'orderid'] = order
        self.setCoinConfigData(df)

    def saveCoinLimitBuyData(self, coin, price, amount, order):
        df = self.readConfig()
        df.at[coin, 'starting'] = price
        df.at[coin, 'takeProfitAmount'] = amount
        df.at[coin, 'takeProfitOrder'] = order
        self.setCoinConfigData(df)

    # check to see how much can be purchased with the current capital
    # then purchase that amount of coins
    def buyForProfit(self, coin, strat=None):
        coinsCapital = self.getCoinConfigData(coin)['capital']
        avalFunds = self.getBuyPower()
        print(
            f"capital {coinsCapital} above funds {avalFunds} is {coinsCapital > avalFunds}"
        )
        if (coinsCapital > avalFunds):
            return 0
        if float(self.getCoinConfigData(coin)['autobought']) > 0:
            return 0

        price = self.getQuote(coin)
        #TODO add logic that allows for multiple strategies that will
        #allow for different allocations of the starting capital
        bought = float(coinsCapital / self.getQuote(coin))
        minOrder = None
        minNot = None
        print(bought)
        #grab the trading rules for the coin
        for filt in (self.candles.getCoinInfo(coin)['filters']):
            if filt['filterType'] == "LOT_SIZE":
                minOrder = float(filt['minQty'])
            if filt['filterType'] == 'MIN_NOTIONAL':
                minNot = float(filt['minNotional'])
        mod = bought % minOrder

        #make sure the amount we are buying is standardized for Binance
        if mod:
            bought = bought - mod

        #this needs to get the perciesion from the filter

        bought = round(bought,
                       int(self.candles.getCoinInfo(coin)['quotePrecision']))
        print(bought)
        if (bought * price) > minNot:
            order = self.orderNumber(coin, bought)
            self.saveCoinBuyData(coin, price, bought)
            self.logit(f"BUYING {order}", "logger")
            #reset our coin data so we can have a current graph
            file = pathlib.Path(f"testData/{coin}.txt")
            if file.exists():
                os.rename(f"testData/{coin}.txt",
                          f"testData/{coin}{datetime.now()}.txt")
        else:
            bought = None
            self.logit(
                f"Failed to buy {bought}, {coin}. Due minNotional of {minNot}",
                "logger")
        return bought, price

    # check to see how much can be purchased with the current capital
    # then purchase that amount of coins
    def buyForLimit(self, coin, strat=None):
        #TODO seperate the capital out so we can run both at the same time
        coinsCapital = float(self.getCoinConfigData(coin)['capital'])
        avalFunds = self.getBuyPower()
        prevoiusID = self.getCoinConfigData(coin)['takeProfitOrder']
        print(avalFunds)
        print(coinsCapital)
        if (coinsCapital > avalFunds):
            print("no money")
            return None
        #test here to see if previous order has been filled
        previousOrder = self.getCoinConfigData(coin)['takeProfitOrder']
        print(f"Prevoius {previousOrder}")
        if previousOrder != "none":
            print("previous order")
            status = self.candles.checkStatus(coin, previousOrder)
            #if previous status has been filled. We need to save the winnings to new capital
            print(f"status {status}")
            if 'FILLED' != status:
                print("previous has not filled yet")
                return None
            print("previous has been filled so let's buy more")

        price = self.getQuote(coin)
        #TODO add logic that allows for multiple strategies that will
        #allow for different allocations of the starting capital
        bought = float(coinsCapital / self.getQuote(coin))
        minOrder = None
        minNot = None
        print(bought)
        #grab the trading rules for the coin
        for filt in (self.candles.getCoinInfo(coin)['filters']):
            if filt['filterType'] == "LOT_SIZE":
                minOrder = float(filt['minQty'])
            if filt['filterType'] == 'MIN_NOTIONAL':
                minNot = float(filt['minNotional'])
        mod = bought % minOrder

        #make sure the amount we are buying is standardized for Binance
        if mod:
            bought = bought - mod

        #this needs to get the perciesion from the filter
        bought = round(bought,
                       int(self.candles.getCoinInfo(coin)['quotePrecision']))
        print(bought)
        if (bought * price) > minNot:
            order = self.orderNumber(coin, bought)
            self.logit(f"BUYING {order}", "logger")
            #reset our coin data so we can have a current graph
            file = pathlib.Path(f"testData/{coin}.txt")
            if file.exists():
                os.rename(f"testData/{coin}.txt",
                          f"testData/{coin}{datetime.now()}.txt")
            return order
        else:
            bought = None
            self.logit(
                f"Failed to buy {bought}, {coin}. Due minNotional of {minNot}",
                "logger")
        return None

    #sell an amount at current price
    def sellNow(self, coin):
        #get the amount the bot bought
        amount = self.getAutoBoughtAmount(coin)
        print(f"we have {amount}")
        if amount > 0:
            print(f"selling")
            #NEED TO CHECK TO SEE IF WE CURRNTLY HAVE AN ORDER
            orderID = self.getCoinConfigData(coin)['orderid']
            print(f"previous order {orderID}")
            if "none" not in orderid:
                print("cancelOrder")
                self.candles.cancelOrder(coin, orderid)
                time.sleep(.3)
            # self.candles.testOrder(coin, SIDE_SELL, amount)
            sellorder = self.candles.sellMarket(coin, amount)
            orderID = sellorder['clientOrderId']
            # save the data for analysis later and reset the bot coin's config
            self.logit(f"SELLING DUE TO STRAT {sellorder}", "logger")
            sellprice = float(sellorder['fills'][0]['price']) * amount
            print(sellprice)
            self.saveCoinBuyData(coin, 0, 0, setcap=sellprice)

    def doMaxProfit(self, coin, action):
        self.logit(f"buysell {action}", "logger")
        self.logit(f"symbol {coin}", "logger")
        if action == 'sell':
            self.sellNow(coin)
        if action == 'buy':
            self.buyForProfit(coin)

    def doTakeProfit(self, coin, action):
        self.logit(f"buysell limit {action}", "logger")
        self.logit(f"symbol limit {coin}", "logger")
        #add sell logic
        if action == 'buy':
            order = self.buyForLimit(coin)
            time.sleep(.5)
            if order is not None:
                price = float(order['fills'][0]['price'])
                limit = price * 1.01
                amount = float(order['fills'][0]['qty'])
                limit_order = self.candles.sellLimit(coin, amount, limit)
                self.saveCoinLimitBuyData(coin, price, limit,
                                          limit_order['clientOrderId'])
        elif action == 'sell':
            previousOrder = self.getCoinConfigData(coin)['takeProfitOrder']
            if previousOrder != "none":
                print("previous order")
                status = self.candles.checkStatus(coin, previousOrder)
                #if previous status has been filled. We need to save the winnings to new capital
                print(f"status {status}")
                if 'FILLED' != status:
                    print("previous has not filled yet so we gonna cancel")
                    self.candles.cancelOrder(coin, previousOrder)
                    time.sleep(.3)
                    self.sellNow(coin)
                    return None
                print("previous has been filled so nothing to do")
Esempio n. 28
0
def main():
    ret = SUCCESS_EXITVAL

    parser = argparse.ArgumentParser(
        description='project mirroring',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        parents=[get_base_parser(tool_version=__version__)])

    parser.add_argument('project', nargs='*', default=None)
    parser.add_argument('-a',
                        '--all',
                        action='store_true',
                        help='mirror all indexed projects',
                        default=False)
    parser.add_argument('-c',
                        '--config',
                        help='config file in JSON/YAML format')
    parser.add_argument('--check_config',
                        action='store_true',
                        help='check configuration and exit')
    parser.add_argument('-U',
                        '--uri',
                        default='http://localhost:8080/source',
                        help='uri of the webapp with context path')
    parser.add_argument('-b',
                        '--batch',
                        action='store_true',
                        help='batch mode - will log into a file')
    parser.add_argument('-L', '--logdir', help='log directory')
    parser.add_argument('-B',
                        '--backupcount',
                        default=8,
                        type=int,
                        help='how many log files to keep around in batch mode')
    parser.add_argument('-I',
                        '--check-changes',
                        action='store_true',
                        help='Check for changes in the project or its'
                        ' repositories,'
                        ' terminate the processing'
                        ' if no change is found.')
    parser.add_argument(
        '--strip-outgoing',
        action='store_true',
        default=False,
        help='check outgoing changes for each repository of a project,'
        'strip any such changes and remove all project data so that'
        'it can be reindexed from scratch. Supported: Git')
    parser.add_argument('-w',
                        '--workers',
                        default=cpu_count(),
                        type=int,
                        help='Number of worker processes')
    add_http_headers(parser)
    parser.add_argument('--api_timeout',
                        type=int,
                        default=3,
                        help='Set response timeout in seconds '
                        'for RESTful API calls')
    parser.add_argument(
        '--async_api_timeout',
        type=int,
        default=300,
        help='Set timeout in seconds for asynchronous REST API calls')

    try:
        args = parser.parse_args()
    except ValueError as e:
        return fatal(e, False)

    logger = get_console_logger(get_class_basename(), args.loglevel)

    if args.config:
        config = read_config(logger, args.config)
        if config is None:
            return fatal("Cannot read config file from {}".format(args.config),
                         False)
    else:
        config = {}

    if not check_configuration(config):
        logger.error("Configuration check failed, exiting")
        return 1

    if args.check_config:
        logger.info("Configuration check passed, exiting")
        return 0

    nomirror = os.environ.get(OPENGROK_NO_MIRROR_ENV)
    if nomirror and len(nomirror) > 0:
        logger.debug(
            "skipping mirror based on the {} environment variable".format(
                OPENGROK_NO_MIRROR_ENV))
        return SUCCESS_EXITVAL

    if len(args.project) > 0 and args.all:
        return fatal("Cannot use both project list and -a/--all", False)

    if not args.all and len(args.project) == 0:
        return fatal("Need at least one project or --all", False)

    uri = args.uri
    if not is_web_uri(uri):
        return fatal("Not a URI: {}".format(uri), False)
    logger.debug("web application URI = {}".format(uri))

    headers = get_headers(args.header)

    # Save the source root to avoid querying the web application.
    source_root = get_config_value(logger,
                                   'sourceRoot',
                                   uri,
                                   headers=headers,
                                   timeout=args.api_timeout)
    if not source_root:
        return 1

    logger.debug("Source root = {}".format(source_root))

    hookdir = config.get(HOOKDIR_PROPERTY)
    if hookdir:
        logger.debug("Hook directory = {}".format(hookdir))

    command_timeout = get_int(logger, "command timeout",
                              config.get(CMD_TIMEOUT_PROPERTY))
    if command_timeout:
        logger.debug("Global command timeout = {}".format(command_timeout))

    hook_timeout = get_int(logger, "hook timeout",
                           config.get(HOOK_TIMEOUT_PROPERTY))
    if hook_timeout:
        logger.debug("Global hook timeout = {}".format(hook_timeout))

    logdir = None
    # Log messages to dedicated log file if running in batch mode.
    if args.batch:
        if args.logdir:
            logdir = args.logdir
        else:
            logdir = config.get(LOGDIR_PROPERTY)
            if not logdir:
                return fatal(
                    "The {} property is required in batch mode".format(
                        LOGDIR_PROPERTY), False)

    projects = args.project
    if len(projects) == 1:
        lockfile = projects[0] + "-mirror"
    else:
        lockfile = os.path.basename(sys.argv[0])

    if args.all:
        projects = list_indexed_projects(logger,
                                         args.uri,
                                         headers=headers,
                                         timeout=args.api_timeout)

    lock = FileLock(os.path.join(tempfile.gettempdir(), lockfile + ".lock"))
    try:
        with lock.acquire(timeout=0):
            with Pool(processes=int(args.workers)) as pool:
                worker_args = []
                for x in projects:
                    worker_args.append([
                        x, logdir, args.loglevel, args.backupcount, config,
                        args.check_changes, args.strip_outgoing, args.uri,
                        source_root, args.batch, headers, args.api_timeout,
                        args.async_api_timeout
                    ])
                try:
                    project_results = pool.map(worker, worker_args, 1)
                except KeyboardInterrupt:
                    return FAILURE_EXITVAL
                else:
                    if any([x == FAILURE_EXITVAL for x in project_results]):
                        ret = FAILURE_EXITVAL
                    if all([x == CONTINUE_EXITVAL for x in project_results]):
                        ret = CONTINUE_EXITVAL
    except Timeout:
        logger.warning("Already running, exiting.")
        return FAILURE_EXITVAL

    logging.shutdown()
    return ret
Esempio n. 29
0
def test_wf_in_actor(workflow_start_regular, tmp_path):
    fail_flag = tmp_path / "fail"
    cnt = tmp_path / "count"
    cnt.write_text(str(0))
    lock_file = tmp_path / "lock"

    @workflow.step
    def start_session():
        if fail_flag.exists():
            raise Exception()
        v = int(cnt.read_text()) + 1
        cnt.write_text(str(v))
        with FileLock(str(lock_file)):
            return "UP"

    @workflow.virtual_actor
    class Session:
        def __init__(self):
            self._session_status = "DOWN"

        @workflow.virtual_actor.readonly
        def get_status(self):
            return self._session_status

        def update_session(self, up):
            (ret, err) = up
            if err is None:
                self._session_status = ret
            else:
                self._session_status = err
            return self._session_status

        def session_start(self):
            step = start_session.step()
            return step

        def session_start_with_status(self):
            self._session_status = "STARTING"
            return self.update_session.step(
                start_session.options(catch_exceptions=True).step())

        def __getstate__(self):
            return self._session_status

        def __setstate__(self, state):
            self._session_status = state

    actor = Session.get_or_create("session_id")
    fail_flag.touch()
    with pytest.raises(Exception):
        actor.session_start.run()
    fail_flag.unlink()
    ray.get(workflow.resume("session_id"))
    # After resume, it'll rerun start_session which will
    # generate 1
    assert cnt.read_text() == "1"
    assert actor.session_start.run() == "UP"
    assert cnt.read_text() == "2"
    assert actor.session_start_with_status.run() == "UP"
    assert cnt.read_text() == "3"

    # Now test a new session.
    actor = Session.get_or_create("session_id")
    fail_flag.touch()
    assert isinstance(actor.session_start_with_status.run(), Exception)
    assert cnt.read_text() == "3"
    lock = FileLock(str(lock_file))
    lock.acquire()
    fail_flag.unlink()
    ret = actor.session_start_with_status.run_async()
    for i in range(0, 60):
        if cnt.read_text() == "4":
            break
        time.sleep(1)
    assert cnt.read_text() == "4"
    # This means when return from session_start_with_status,
    # the session got updated
    assert actor.get_status.run() == "STARTING"
    lock.release()
    assert ray.get(ret) == "UP"
Esempio n. 30
0
def main():
    dirs_to_process = []

    parser = argparse.ArgumentParser(
        description='Manage parallel workers.',
        parents=[get_baseparser(tool_version=__version__)])
    parser.add_argument('-w',
                        '--workers',
                        default=multiprocessing.cpu_count(),
                        help='Number of worker processes')

    # There can be only one way how to supply list of projects to process.
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-d', '--directory', help='Directory to process')
    group1.add_argument('-P',
                        '--projects',
                        nargs='*',
                        help='List of projects to process')

    parser.add_argument('-I',
                        '--indexed',
                        action='store_true',
                        help='Sync indexed projects only')
    parser.add_argument('-i',
                        '--ignore_errors',
                        nargs='*',
                        help='ignore errors from these projects')
    parser.add_argument('-c',
                        '--config',
                        required=True,
                        help='config file in JSON/YAML format')
    parser.add_argument('-U',
                        '--uri',
                        default='http://localhost:8080/source',
                        help='URI of the webapp with context path')
    parser.add_argument('-f',
                        '--driveon',
                        action='store_true',
                        default=False,
                        help='continue command sequence processing even '
                        'if one of the commands requests break')
    try:
        args = parser.parse_args()
    except ValueError as e:
        fatal(e)

    logger = get_console_logger(get_class_basename(), args.loglevel)

    uri = args.uri
    if not is_web_uri(uri):
        logger.error("Not a URI: {}".format(uri))
        sys.exit(FAILURE_EXITVAL)
    logger.debug("web application URI = {}".format(uri))

    # First read and validate configuration file as it is mandatory argument.
    config = read_config(logger, args.config)
    if config is None:
        logger.error("Cannot read config file from {}".format(args.config))
        sys.exit(FAILURE_EXITVAL)

    # Changing working directory to root will avoid problems when running
    # programs via sudo/su. Do this only after the config file was read
    # so that its path can be specified as relative.
    try:
        os.chdir("/")
    except OSError:
        logger.error("cannot change working directory to /", exc_info=True)
        sys.exit(FAILURE_EXITVAL)

    try:
        commands = config["commands"]
    except KeyError:
        logger.error("The config file has to contain key \"commands\"")
        sys.exit(FAILURE_EXITVAL)

    directory = args.directory
    if not args.directory and not args.projects and not args.indexed:
        # Assume directory, get the source root value from the webapp.
        directory = get_config_value(logger, 'sourceRoot', uri)
        if not directory:
            logger.error("Neither -d or -P or -I specified and cannot get "
                         "source root from the webapp")
            sys.exit(FAILURE_EXITVAL)
        else:
            logger.info("Assuming directory: {}".format(directory))

    ignore_errors = []
    if args.ignore_errors:
        ignore_errors = args.ignore_errors
    else:
        try:
            ignore_errors = config["ignore_errors"]
        except KeyError:
            pass
    logger.debug("Ignored projects: {}".format(ignore_errors))

    lock = FileLock(os.path.join(tempfile.gettempdir(), "opengrok-sync.lock"))
    try:
        with lock.acquire(timeout=0):
            if args.projects:
                dirs_to_process = args.projects
                logger.debug(
                    "Processing directories: {}".format(dirs_to_process))
            elif args.indexed:
                indexed_projects = list_indexed_projects(logger, uri)
                logger.debug(
                    "Processing indexed projects: {}".format(indexed_projects))

                if indexed_projects:
                    for line in indexed_projects:
                        dirs_to_process.append(line.strip())
                else:
                    logger.error("cannot get list of projects")
                    sys.exit(FAILURE_EXITVAL)
            else:
                logger.debug("Processing directory {}".format(directory))
                for entry in os.listdir(directory):
                    if path.isdir(path.join(directory, entry)):
                        dirs_to_process.append(entry)

            logger.debug("to process: {}".format(dirs_to_process))

            cmds_base = []
            for d in dirs_to_process:
                cmd_base = CommandSequenceBase(d, commands, args.loglevel,
                                               config.get("cleanup"),
                                               args.driveon)
                cmds_base.append(cmd_base)

            # Map the commands into pool of workers so they can be processed.
            with Pool(processes=int(args.workers)) as pool:
                try:
                    cmds_base_results = pool.map(worker, cmds_base, 1)
                except KeyboardInterrupt:
                    sys.exit(FAILURE_EXITVAL)
                else:
                    for cmds_base in cmds_base_results:
                        logger.debug(
                            "Checking results of project {}".format(cmds_base))
                        cmds = CommandSequence(cmds_base)
                        cmds.fill(cmds_base.retcodes, cmds_base.outputs,
                                  cmds_base.failed)
                        cmds.check(ignore_errors)
    except Timeout:
        logger.warning("Already running, exiting.")
        sys.exit(FAILURE_EXITVAL)
Esempio n. 31
0
class WeblateLock:
    """Wrapper around Redis or file based lock."""
    def __init__(
        self,
        lock_path: str,
        scope: str,
        key: int,
        slug: str,
        cache_template: str = "lock:{scope}:{key}",
        file_template: Optional[str] = "{slug}-{scope}.lock",
        timeout: int = 1,
    ):
        self._timeout = timeout
        self._lock_path = lock_path
        self._scope = scope
        self._key = key
        self._slug = slug
        self._depth = 0
        default_cache = caches["default"]
        self.use_redis = isinstance(default_cache, RedisCache)
        if self.use_redis:
            # Prefer Redis locking as it works distributed
            self._lock = Lock(
                default_cache.client.get_client(),
                name=self._format_template(cache_template),
                expire=5,
                auto_renewal=True,
            )
        else:
            # Fall back to file based locking
            self._lock = FileLock(
                os.path.join(lock_path, self._format_template(file_template)),
                timeout=self._timeout,
            )

    def _format_template(self, template: str):
        return template.format(
            scope=self._scope,
            key=self._key,
            slug=self._slug,
        )

    def __enter__(self):
        self._depth += 1
        if self._depth > 1:
            return
        if self.use_redis:
            try:
                if not self._lock.acquire(timeout=self._timeout):
                    raise WeblateLockTimeout()
            except AlreadyAcquired:
                pass
        else:
            # Fall back to file based locking
            try:
                self._lock.acquire()
            except Timeout:
                raise WeblateLockTimeout()

    def __exit__(self, exc_type, exc_value, traceback):
        self._depth -= 1
        if self._depth > 0:
            return
        try:
            self._lock.release()
        except NotAcquired:
            # This can happen in case of overloaded server fails to renew the
            # lock before expiry
            pass

    @property
    def is_locked(self):
        return bool(self._depth)
Esempio n. 32
0
def main():
    dirs_to_process = []

    parser = argparse.ArgumentParser(description='Manage parallel workers.',
                                     parents=[
                                         get_baseparser(
                                             tool_version=__version__)
                                     ])
    parser.add_argument('-w', '--workers', default=multiprocessing.cpu_count(),
                        help='Number of worker processes')

    # There can be only one way how to supply list of projects to process.
    group1 = parser.add_mutually_exclusive_group()
    group1.add_argument('-d', '--directory',
                        help='Directory to process')
    group1.add_argument('-P', '--projects', nargs='*',
                        help='List of projects to process')

    parser.add_argument('-I', '--indexed', action='store_true',
                        help='Sync indexed projects only')
    parser.add_argument('-i', '--ignore_errors', nargs='*',
                        help='ignore errors from these projects')
    parser.add_argument('-c', '--config', required=True,
                        help='config file in JSON format')
    parser.add_argument('-U', '--uri', default='http://localhost:8080/source',
                        help='URI of the webapp with context path')
    parser.add_argument('-f', '--driveon', action='store_true', default=False,
                        help='continue command sequence processing even '
                        'if one of the commands requests break')
    try:
        args = parser.parse_args()
    except ValueError as e:
        fatal(e)

    logger = get_console_logger(get_class_basename(), args.loglevel)

    uri = args.uri
    if not is_web_uri(uri):
        logger.error("Not a URI: {}".format(uri))
        sys.exit(1)
    logger.debug("web application URI = {}".format(uri))

    # First read and validate configuration file as it is mandatory argument.
    config = read_config(logger, args.config)
    if config is None:
        logger.error("Cannot read config file from {}".format(args.config))
        sys.exit(1)

    # Changing working directory to root will avoid problems when running
    # programs via sudo/su. Do this only after the config file was read
    # so that its path can be specified as relative.
    try:
        os.chdir("/")
    except OSError:
        logger.error("cannot change working directory to /",
                     exc_info=True)
        sys.exit(1)

    try:
        commands = config["commands"]
    except KeyError:
        logger.error("The config file has to contain key \"commands\"")
        sys.exit(1)

    directory = args.directory
    if not args.directory and not args.projects and not args.indexed:
        # Assume directory, get the source root value from the webapp.
        directory = get_config_value(logger, 'sourceRoot', uri)
        if not directory:
            logger.error("Neither -d or -P or -I specified and cannot get "
                         "source root from the webapp")
            sys.exit(1)
        else:
            logger.info("Assuming directory: {}".format(directory))

    ignore_errors = []
    if args.ignore_errors:
        ignore_errors = args.ignore_errors
    else:
        try:
            ignore_errors = config["ignore_errors"]
        except KeyError:
            pass
    logger.debug("Ignored projects: {}".format(ignore_errors))

    lock = FileLock(os.path.join(tempfile.gettempdir(),
                                 "opengrok-sync.lock"))
    try:
        with lock.acquire(timeout=0):
            if args.projects:
                dirs_to_process = args.projects
                logger.debug("Processing directories: {}".
                             format(dirs_to_process))
            elif args.indexed:
                indexed_projects = list_indexed_projects(logger, uri)
                logger.debug("Processing indexed projects: {}".
                             format(indexed_projects))

                if indexed_projects:
                    for line in indexed_projects:
                        dirs_to_process.append(line.strip())
                else:
                    logger.error("cannot get list of projects")
                    sys.exit(1)
            else:
                logger.debug("Processing directory {}".format(directory))
                for entry in os.listdir(directory):
                    if path.isdir(path.join(directory, entry)):
                        dirs_to_process.append(entry)

            logger.debug("to process: {}".format(dirs_to_process))

            cmds_base = []
            for d in dirs_to_process:
                cmd_base = CommandSequenceBase(d, commands, args.loglevel,
                                               config.get("cleanup"),
                                               args.driveon)
                cmds_base.append(cmd_base)

            # Map the commands into pool of workers so they can be processed.
            with Pool(processes=int(args.workers)) as pool:
                try:
                    cmds_base_results = pool.map(worker, cmds_base, 1)
                except KeyboardInterrupt:
                    sys.exit(1)
                else:
                    for cmds_base in cmds_base_results:
                        logger.debug("Checking results of project {}".
                                     format(cmds_base))
                        cmds = CommandSequence(cmds_base)
                        cmds.fill(cmds_base.retcodes, cmds_base.outputs,
                                  cmds_base.failed)
                        cmds.check(ignore_errors)
    except Timeout:
        logger.warning("Already running, exiting.")
        sys.exit(1)
Esempio n. 33
0
conn.close()

prev_price = -1
while(True):
	try:
		lock = FileLock('coinut.lock')
		
		response = urllib.urlopen(url);
		data = json.loads(response.read())
	
		timestamp = long(data['timestamp'])
		price = float(data['tick'])
	
		if price != prev_price:
			if lock.acquire():
				conn = sqlite3.connect('coinut.db')
				c = conn.cursor()
			
				with conn:
					values = (timestamp, price,)
					c.execute("INSERT INTO ticker VALUES (?,?)", values)
	
					print values

					conn.commit()
					
					lock.release()
					
			prev_price = price
	except KeyboardInterrupt:
Esempio n. 34
0
class Store(object):
    auto_sync = True
    auto_mem_resync_counter = 0

    @staticmethod
    def is_valid_schema(self):
        return all([len(x) == 3 and x[1] in DUMPERS for x in self])

    def __init__(self, db_name, schema, log_level=1):
        """
        schema example = (
            (name, type, len),
            ...
        )

        int
        str
        """
        self._log_lvl = log_level
        db_name = os.path.join('db', db_name)

        self.lock = FileLock(db_name)

        if not self.is_valid_schema(schema):
            self.error("Invalid DB schema")
            raise Exception('Schema is bead!')

        all = self.init_mem(db_name, schema)

        self._memory = all
        self._db_name = db_name
        self._schema = schema

    def info(self, msg):
        if self._log_lvl > 3:
            print(msg)

    def error(self, msg):
        if self._log_lvl > 0:
            print(msg)

    def _insert(self, object_dict_):
        assert type(object_dict_) == dict, 'Wow! Not dict!'
        self._memory.append(object_dict_)

    def sync(self):
        self.lock.acquire()
        f = open(self._db_name, 'wb')
        for d in self._memory:
            for name_, type_, len_ in self._schema:
                bytes_bytes = DUMPERS[type_](d[name_], len_)
                f.write(bytes_bytes)
            f.write('\n')
        f.close()
        self.lock.release()
        self._memory = self.init_mem(self._db_name, self._schema)

    def execute(self, row):
        """
        Execute SQL query. Support: select, delete, insert.
        """
        # row = "select where value[1] == 'd' and id >= 0 limit 2"
        # row = "insert into ... values (2, 'awdwa')"
        # row = "delete where ... limit k"
        try:
            method, tail = row.split(' ', 1)
            method = method.lower()
            tail = tail.strip(' ')
            rez = None

            self.info(u'-- SQL {0} {1} --'.format(method, tail))

            if method == 'insert':
                r = re.compile(r'^.*?values?[ ]*(\(.*?\))$', re.M)
                z = r.match(tail)
                if z:
                    rez = [self.insert(*z.groups())]
                    if self.auto_sync:
                        self.sync()

            elif method in ['select', 'delete']:
                r = re.compile(r'^.*?(?:(?:where)[ ]*(.*?))?[ ]*(?:limit[ ]*(\d+))?[ ]*([dD][Ee][ScCs][ckCK])?[ ]*$')
                z = r.match(tail)
                if z:
                    rez = self.__getattribute__('go_go')(method, z.groups())
                else:
                    rez = self.__getattribute__(method)()
            elif method == 'last':
                rez = [self.last()]

            if hasattr(rez, '__len__') and rez.__len__() == 1:
                return rez[0]

            return rez
        except Exception as e:
            self.error("Invalid SQL syntax detected: {0!r} by {1}".format(row, e))
            raise Exception('Invalid SQL syntax!!')

    def go_go(self, method, args):
        return self.__getattribute__(method)(*args)

    def delete(self, where=None, limit=None, desk=None):
        limit = int(limit.strip()) if limit else 0
        where = 'True' if not where else where
        where = self.fix_where(where)

        rez = 0
        del_indexes = []
        l = locals()
        i = 0
        mem = self._memory if not desk else reversed(self._memory)
        for d in mem:
            for name_, type_, len_ in self._schema:
                l[name_] = d[name_]

            st = parser.expr(where)
            is_ok = eval(st.compile())

            if is_ok:
                rez += 1
                del_indexes.append(i)

            i += 1

            if limit and rez >= limit:
                break

        z = 0
        for x in sorted(del_indexes):
            self._delete_dy_index(x - z)
            z += 1

        return rez

    def _delete_dy_index(self, index):
        if 0 <= index < len(self._memory):
            del self._memory[index]
            return 1
        return 0

    def _delete_dy_indexes(self, *indexes):
        del_counter = 0
        for index in sorted(indexes):
            deleted = self._delete_dy_index(index - del_counter)
            del_counter += deleted
        return del_counter

    def _memory_dump(self):
        print '\n-- dump --'
        for d in self._memory:
            print d.values()
        print '-- |--| --\n'

    def select(self, where=None, limit=None, desk=None):
        limit = int(limit.strip()) if limit else 0
        where = 'True' if not where else where
        where = self.fix_where(where)

        rez = []
        l = locals()
        mem = self._memory if not desk else reversed(self._memory)
        for d in mem:
            for name_, type_, len_ in self._schema:
                l[name_] = d[name_]

            st = parser.expr(where)
            is_ok = eval(st.compile())

            if limit and len(rez) >= limit:
                return rez

            if is_ok:
                rez.append(d)
        return rez

    def insert(self, insert_obj_row):
        if not insert_obj_row.startswith('(') or not insert_obj_row.endswith(')'):
            return
        insert_obj_row = insert_obj_row.replace("'", "'''")
        insert_obj_row = insert_obj_row.replace("\"", "'''")
        try:
            st = parser.expr(insert_obj_row)
            obj = eval(st.compile())
            if type(obj) != tuple or len(obj) != len(self._schema):
                return

            d = {}
            i = 0
            for name_, type_, len_ in self._schema:
                d[name_] = obj[i]
                i += 1

                _ck = CHECKERS[type_]
                if _ck(d[name_]):
                    return

            self._insert(d)
            return d
        except Exception as e:
            self.error('Insertion error!', e)
            return

    def fix_where(self, where):
        z = where.replace(' = ', ' == ')
        z = z.replace('__', '')
        return z.replace('import', '')

    def last(self):
        pass

    def init_mem(self, db_name, schema):
        i = 0
        all = []
        data = {}

        self.lock.acquire()

        def read_tail(f):
            while True:
                c = f.read(1)
                if not c or c == '\n':
                    return

        try:
            _r = open(db_name, 'rb')
        except:
            self.info("Create New DB")
            try:
                t = open(db_name, 'a')
                t.close()
            except Exception as e:
                self.error("Can not create NEW DB", e)
                raise Exception('Can not create new DB')

        try:
            _r = open(db_name, 'rb')
        except:
            self.error("I/0 Error #1! Can not open!")
            raise Exception("I/0 Error #1! Can not open!")

        while 1:
            if i == len(schema):
                all.append(data)
                read_tail(_r)
                i = 0
                data = {}

            name_, type_, len_ = schema[i]

            d = _r.read(len_).replace('\0', '')
            if not d: break

            zero = _r.read(1)
            if not zero or ord(zero) != 0:
                read_tail(_r)
                i = 0
                data = {}
                continue

            try:
                data[name_] = type_(d)
            except:
                read_tail(_r)
                i = 0
                data = {}
            else:
                i += 1

        _r.close()

        self.lock.release()

        return all
Esempio n. 35
0
class CandleConnector():
    def __init__(self):
        self.lock = FileLock("config.csv.lock")
        # make dict here that stores the amount for each coin
        self.config = "config.csv"
        self.candles = candles.BinaceConnector()

    def readConfig(self):
        self.lock.acquire()
        df = pd.read_csv(self.config,
                         encoding='utf8',
                         delimiter=',',
                         names=[
                             'coin', 'capital', 'starting', 'limit',
                             'currentPrice', 'autobought', 'takeprofit',
                             'updatetime', 'orderid'
                         ])
        self.lock.release()
        df.set_index('coin', inplace=True)
        return df

    #get the current config
    def getCoinConfigData(self, coin):
        df = self.readConfig()
        return df.loc[coin]

    def getBuyPower(self):
        return float(self.candles.getUSD())

    #save a new copy of the config
    def setCoinConfigData(self, df):
        self.lock.acquire()
        df.to_csv(f'config.csv', mode='w', header=False, index=True)
        self.lock.release()

    def getAutoBoughtAmount(self, coin):
        return float(self.getCoinConfigData(coin)['autobought'])

    # helper for buying a number of coins at current price
    def orderNumber(self, coin, number):
        return (self.candles.buyMarket(coin, number))

    # gives you a quote for a coin
    def getQuote(self, coin):
        return float(self.candles.getCoinPrice(coin))

    # set an order for an number amount
    def orderAmount(self, coin, amount):
        return (self.candles.order_buy_crypto_by_price(coin, amount))

    # write out to a log file
    def logit(self, message, destination):
        with open(f"logdata/{destination}.txt", "a") as f:
            f.write(message)
            f.write("\n")

    def saveCoinBuyData(self, coin, price, amount, setcap=None):
        df = self.readConfig()
        if setcap is not None:
            df.at[coin, 'capital'] = setcap
        df.at[coin, 'starting'] = price
        df.at[coin, 'autobought'] = amount
        df.at[coin, 'limit'] = price * .98
        self.setCoinConfigData(df)

    # check to see how much can be purchased with the current capital
    # then purchase that amount of coins
    def buyNow(self, coin, strat=None):
        coinsCapital = self.getCoinConfigData(coin)['capital']

        avalFunds = self.getBuyPower()
        if (coinsCapital > avalFunds) is True:
            return 0

        price = self.getQuote(coin)
        #TODO add logic that allows for multiple strategies that will
        #allow for different allocations of the starting capital
        BOUGHT = float(coinsCapital / self.getQuote(coin))
        minOrder = None
        minNot = None
        print(BOUGHT)
        #grab the trading rules for the coin
        for filt in (self.candles.getCoinInfo(coin)['filters']):
            if filt['filterType'] == "LOT_SIZE":
                minOrder = float(filt['minQty'])
            if filt['filterType'] == 'MIN_NOTIONAL':
                minNot = float(filt['minNotional'])
        mod = BOUGHT % minOrder

        #make sure the amount we are buying is standardized for Binance
        if mod:
            BOUGHT = BOUGHT - mod

        #this needs to get the perciesion from the filter
        BOUGHT = round(BOUGHT, 8)
        print(BOUGHT)
        if (BOUGHT * price) > minNot:
            order = self.orderNumber(coin, BOUGHT)
            self.saveCoinBuyData(coin, price, BOUGHT)
            self.logit(f"BUYING {order}", coin)
        else:
            BOUGHT = None
            self.logit(
                f"Failed to buy {BOUGHT}, {coin}. Due minNotional of {minNot}",
                coin)
        return BOUGHT

    #sell an amount at current price
    def sellNow(self, coin):
        #get the amount the bot bought
        amount = self.getAutoBoughtAmount(coin)
        if amount > 0:
            # self.candles.testOrder(coin, SIDE_SELL, amount)
            sellorder = self.candles.sellMarket(coin, amount)
            orderID = sellorder['clientOrderId']
            status = self.candles.checkStatus(coin, orderID)
            timeout = 5
            time.sleep(2)
            #check a couple of times to make sure we are selling
            while status != 'FILLED':
                if timeout > 5:
                    timeout = 0
                    self.candles.cancelOrder(coin, orderID)
                status = self.candles.checkStatus(coin, orderID)
                timeout += 1
                time.sleep(2)

            # save the data for analysis later and reset the bot coin's config
            self.logit(f"SELLING DUE TO STRAT {sellorder}", coin)
            sellprice = float(sellorder['fills'][0]['price']) * amount
            print(sellprice)
            self.saveCoinBuyData(coin, 0, 0, setcap=sellprice)
Esempio n. 36
0
def main():
    ret = 0

    parser = argparse.ArgumentParser(description='project mirroring',
                                     parents=[get_baseparser(
                                         tool_version=__version__)
                                     ])

    parser.add_argument('project')
    parser.add_argument('-c', '--config',
                        help='config file in JSON/YAML format')
    parser.add_argument('-U', '--uri', default='http://localhost:8080/source',
                        help='uri of the webapp with context path')
    parser.add_argument('-b', '--batch', action='store_true',
                        help='batch mode - will log into a file')
    parser.add_argument('-B', '--backupcount', default=8,
                        help='how many log files to keep around in batch mode')
    parser.add_argument('-I', '--incoming', action='store_true',
                        help='Check for incoming changes, terminate the '
                             'processing if not found.')
    try:
        args = parser.parse_args()
    except ValueError as e:
        print_exc_exit(e)

    logger = get_console_logger(get_class_basename(), args.loglevel)

    if args.config:
        config = read_config(logger, args.config)
        if config is None:
            logger.error("Cannot read config file from {}".format(args.config))
            sys.exit(1)
    else:
        config = {}

    GLOBAL_TUNABLES = [HOOKDIR_PROPERTY, PROXY_PROPERTY, LOGDIR_PROPERTY,
                       COMMANDS_PROPERTY, PROJECTS_PROPERTY,
                       HOOK_TIMEOUT_PROPERTY, CMD_TIMEOUT_PROPERTY]
    diff = diff_list(config.keys(), GLOBAL_TUNABLES)
    if diff:
        logger.error("unknown global configuration option(s): '{}'"
                     .format(diff))
        sys.exit(1)

    # Make sure the log directory exists.
    logdir = config.get(LOGDIR_PROPERTY)
    if logdir:
        check_create_dir(logger, logdir)

    uri = args.uri
    if not is_web_uri(uri):
        logger.error("Not a URI: {}".format(uri))
        sys.exit(1)
    logger.debug("web application URI = {}".format(uri))

    source_root = get_config_value(logger, 'sourceRoot', uri)
    if not source_root:
        sys.exit(1)

    logger.debug("Source root = {}".format(source_root))

    project_config = None
    projects = config.get(PROJECTS_PROPERTY)
    if projects:
        if projects.get(args.project):
            project_config = projects.get(args.project)
        else:
            for proj in projects.keys():
                try:
                    pattern = re.compile(proj)
                except re.error:
                    logger.error("Not a valid regular expression: {}".
                                 format(proj))
                    continue

                if pattern.match(args.project):
                    logger.debug("Project '{}' matched pattern '{}'".
                                 format(args.project, proj))
                    project_config = projects.get(proj)
                    break

    hookdir = config.get(HOOKDIR_PROPERTY)
    if hookdir:
        logger.debug("Hook directory = {}".format(hookdir))

    command_timeout = get_int(logger, "command timeout",
                              config.get(CMD_TIMEOUT_PROPERTY))
    if command_timeout:
        logger.debug("Global command timeout = {}".format(command_timeout))

    hook_timeout = get_int(logger, "hook timeout",
                           config.get(HOOK_TIMEOUT_PROPERTY))
    if hook_timeout:
        logger.debug("Global hook timeout = {}".format(hook_timeout))

    prehook = None
    posthook = None
    use_proxy = False
    ignored_repos = None
    if project_config:
        logger.debug("Project '{}' has specific (non-default) config".
                     format(args.project))

        # Quick sanity check.
        KNOWN_PROJECT_TUNABLES = [DISABLED_PROPERTY, CMD_TIMEOUT_PROPERTY,
                                  HOOK_TIMEOUT_PROPERTY, PROXY_PROPERTY,
                                  IGNORED_REPOS_PROPERTY, HOOKS_PROPERTY]
        diff = diff_list(project_config.keys(), KNOWN_PROJECT_TUNABLES)
        if diff:
            logger.error("unknown project configuration option(s) '{}' "
                         "for project {}".format(diff, args.project))
            sys.exit(1)

        project_command_timeout = get_int(logger, "command timeout for "
                                                  "project {}".
                                          format(args.project),
                                          project_config.
                                          get(CMD_TIMEOUT_PROPERTY))
        if project_command_timeout:
            command_timeout = project_command_timeout
            logger.debug("Project command timeout = {}".
                         format(command_timeout))

        project_hook_timeout = get_int(logger, "hook timeout for "
                                               "project {}".
                                       format(args.project),
                                       project_config.
                                       get(HOOK_TIMEOUT_PROPERTY))
        if project_hook_timeout:
            hook_timeout = project_hook_timeout
            logger.debug("Project hook timeout = {}".
                         format(hook_timeout))

        ignored_repos = project_config.get(IGNORED_REPOS_PROPERTY)
        if ignored_repos:
            if not isinstance(ignored_repos, list):
                logger.error("{} for project {} is not a list".
                             format(IGNORED_REPOS_PROPERTY, args.project))
                sys.exit(1)
            logger.debug("has ignored repositories: {}".
                         format(ignored_repos))

        hooks = project_config.get(HOOKS_PROPERTY)
        if hooks:
            if not hookdir:
                logger.error("Need to have '{}' in the configuration "
                             "to run hooks".format(HOOKDIR_PROPERTY))
                sys.exit(1)

            if not os.path.isdir(hookdir):
                logger.error("Not a directory: {}".format(hookdir))
                sys.exit(1)

            for hookname in hooks:
                if hookname == "pre":
                    prehook = hookpath = os.path.join(hookdir, hooks['pre'])
                    logger.debug("pre-hook = {}".format(prehook))
                elif hookname == "post":
                    posthook = hookpath = os.path.join(hookdir, hooks['post'])
                    logger.debug("post-hook = {}".format(posthook))
                else:
                    logger.error("Unknown hook name {} for project {}".
                                 format(hookname, args.project))
                    sys.exit(1)

                if not is_exe(hookpath):
                    logger.error("hook file {} does not exist or not "
                                 "executable".format(hookpath))
                    sys.exit(1)

        if project_config.get(PROXY_PROPERTY):
            if not config.get(PROXY_PROPERTY):
                logger.error("global proxy setting is needed in order to"
                             "have per-project proxy")
                sys.exit(1)

            logger.debug("will use proxy")
            use_proxy = True

    if not ignored_repos:
        ignored_repos = []

    # Log messages to dedicated log file if running in batch mode.
    if args.batch:
        if not logdir:
            logger.error("The logdir property is required in batch mode")
            sys.exit(1)

        logfile = os.path.join(logdir, args.project + ".log")
        logger.debug("Switching logging to the {} file".
                     format(logfile))

        logger = logger.getChild("rotating")
        logger.setLevel(args.loglevel)
        logger.propagate = False
        handler = RotatingFileHandler(logfile, maxBytes=0, mode='a',
                                      backupCount=args.backupcount)
        formatter = logging.Formatter("%(asctime)s - %(levelname)s: "
                                      "%(message)s", '%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(formatter)
        handler.doRollover()
        logger.addHandler(handler)

    # We want this to be logged to the log file (if any).
    if project_config:
        if project_config.get(DISABLED_PROPERTY):
            logger.info("Project {} disabled, exiting".
                        format(args.project))
            sys.exit(CONTINUE_EXITVAL)

    lock = FileLock(os.path.join(tempfile.gettempdir(),
                                 args.project + "-mirror.lock"))
    try:
        with lock.acquire(timeout=0):
            proxy = config.get(PROXY_PROPERTY) if use_proxy else None

            #
            # Cache the repositories first. This way it will be known that
            # something is not right, avoiding any needless pre-hook run.
            #
            repos = []
            try:
                repos = get_repos_for_project(logger, args.project,
                                              ignored_repos,
                                              commands=config.
                                              get(COMMANDS_PROPERTY),
                                              proxy=proxy,
                                              command_timeout=command_timeout,
                                              source_root=source_root,
                                              uri=uri)
            except RepositoryException as ex:
                logger.error('failed to get repositories for project {}: {}'.
                             format(args.project, ex))
                sys.exit(1)

            if not repos:
                logger.info("No repositories for project {}".
                            format(args.project))
                sys.exit(CONTINUE_EXITVAL)

            # Check if any of the repositories contains incoming changes.
            if args.incoming:
                got_incoming = False
                for repo in repos:
                    try:
                        if repo.incoming():
                            logger.debug('Repository {} has incoming changes'.
                                         format(repo))
                            got_incoming = True
                            break
                    except RepositoryException:
                        logger.error('Cannot determine incoming changes for '
                                     'repository {}'.format(repo))
                        sys.exit(1)

                if not got_incoming:
                    logger.info('No incoming changes for repositories in '
                                'project {}'.
                                format(args.project))
                    sys.exit(CONTINUE_EXITVAL)

            if prehook:
                logger.info("Running pre hook")
                if run_hook(logger, prehook,
                            os.path.join(source_root, args.project), proxy,
                            hook_timeout) != 0:
                    logger.error("pre hook failed for project {}".
                                 format(args.project))
                    logging.shutdown()
                    sys.exit(1)

            #
            # If one of the repositories fails to sync, the whole project sync
            # is treated as failed, i.e. the program will return 1.
            #
            for repo in repos:
                logger.info("Synchronizing repository {}".
                            format(repo.path))
                if repo.sync() != 0:
                    logger.error("failed to sync repository {}".
                                 format(repo.path))
                    ret = 1

            if posthook:
                logger.info("Running post hook")
                if run_hook(logger, posthook,
                            os.path.join(source_root, args.project), proxy,
                            hook_timeout) != 0:
                    logger.error("post hook failed for project {}".
                                 format(args.project))
                    logging.shutdown()
                    sys.exit(1)
    except Timeout:
        logger.warning("Already running, exiting.")
        sys.exit(1)

    logging.shutdown()
    sys.exit(ret)
Esempio n. 37
0
             f = f[:ind] + '.png'
             problems.append((f, reason))
 if not checking:
     numImages = min(numImages, NUM_PER_GROUP)
 if autochecking:
     files = problems
 if numDone >= numImages:
     groupsDone[groupIndex] = True
     if startHere is None and not checking and not doTrans:
         #import pdb; pdb.set_trace()
         continue
 #print 'group '+groupName+', template image: '+imageTemplate
 #templateFile=os.path.join(directory,groupName,template)
 lock = FileLock(template, timeout=None)
 try:
     lock.acquire()
     textsT = fieldsT = pairsT = samePairsT = horzLinksT = groupsT = transcriptionsT = cornersT = cornersActualT = None
     if template is not None:
         with open(template) as f:
             read = json.load(f)
             textsT = read['textBBs']
             fieldsT = read['fieldBBs']
             pairsT = read['pairs']
             samePairsT = read['samePairs']
             groupsT = read['groups']
             if 'transcriptions' in read:
                 transcriptionsT = read['transcriptions']
             cornersT = read['page_corners']
             if 'horzLinks' in read and not skipHLinks:
                 horzLinksT = read['horzLinks']
             #cornersActualT=read['actualPage_corners']
Esempio n. 38
0
def main(argv):
    current_ver = ''
    start_ver = ''
    version_list = []
    rev_version_list = []  # Version list in reverse order
    fltg_root_dir = None

    try:
        opts, args = getopt.getopt(argv, "hv:s:d:", ["help"])
    except getopt.GetoptError:
        usage()
        sys.exit(2)
    for opt, arg in opts:
        if opt in ("-h", "--help"):
            usage()
            sys.exit(0);
        elif opt == '-v':
            current_ver = arg
        elif opt == '-s':
            start_ver = arg
        elif opt == '-d':
            fltg_root_dir = arg
        else:
            usage()
            sys.exit(0);

    if start_ver == '':
        start_ver = current_ver

    try:
        sdk = os.environ['SDK']
    except:
        print("SDK Environment variable must be defined")
        sys.exit(0);

    try:
        issu_db_dir = os.environ['ISSU_DB'] + '/'
    except:
        issu_db_dir = sdk + ISSU_DB_DEF_DIR

    base_dir = issu_db_dir
    ver_list_c = issu_version_list.issu_version_list(base_dir,
                                                     start_ver,
                                                     current_ver)
    rev_version_list = ver_list_c.get_ver_list()
    version_list = ver_list_c.get_ver_list(reverse = False)

    # Build the base dir
    if current_ver == '':
        base_dir += 'current'
    else:
        base_dir += 'ver_' + current_ver
    target_dir = base_dir

    # Lock the DB to enable multiple simultaneous builds. If the lock is taken
    # it means that other build is building the DB. Let it finish first before
    # we can jump to the next phase of compiling the DLL
    lock_file = issu_db_dir + 'issu_db.lock'
    try:
        lock = FileLock(lock_file, 1800)
        lock.acquire()  # wait up to 30 min
    except Timeout:
        print('Failed to acquire ISSU DB lock')
        sys.exit(-1)

    db_class = issu_db_gen.global_struct_db(base_dir, None)
    if not db_class.generate_c_db(current_ver, version_list):
        print('Failed to generate DB for version %s' % current_ver)

    base_dir = issu_db_dir + 'ver_'
    for ver in rev_version_list:
        if ver != current_ver:
            db_class.parse_previous_versions(base_dir, ver)

    # Final generation and shutdown the DB gen class
    db_class.shutdown()

    # Save reg file signatures into the current version DB
    if current_ver != '':
        if fltg_root_dir == None:
            fltg_root_dir = sdk + FLTG_INPUT_DIR
        reg_file_c = issu_reg_files.reg_file_sig(issu_db_dir)
        reg_file_c.save_sig(current_ver, fltg_root_dir)

    # Generate the patch list
    patch_class = issu_patch_insert.issu_patch(issu_db_dir)
    patch_class.read_patch_db(version_list)
    patch_class.create_patch_c_file()
    patch_class.create_patch_makefile()
    lock.release()
Esempio n. 39
0
class Client(JSONFile, MioLogger):
    base_dir:     Runtime[Union[Path, str]]
    server:       Union[URL, str] = ""
    device_id:    str             = ""
    user_id:      UserId          = ""  # type: ignore
    access_token: str             = ""

    net:      Runtime[Network]     = field(init=False, repr=False)
    auth:     Runtime[Auth]        = field(init=False, repr=False)
    profile:  Runtime[Profile]     = field(init=False, repr=False)
    rooms:    Runtime[Rooms]       = field(init=False, repr=False)
    sync:     Runtime[Sync]        = field(init=False, repr=False)
    e2e:      Runtime[E2E]         = field(init=False, repr=False)
    devices:  Runtime[Devices]     = field(init=False, repr=False)
    media:    Runtime[MediaStore]  = field(init=False, repr=False)
    _filters: Runtime[FilterStore] = field(init=False, repr=False)

    _lock:       Runtime[Optional[FileLock]] = field(init=False, repr=False)
    _terminated: Runtime[bool]               = field(init=False, repr=False)


    def __post_init__(self) -> None:
        MioLogger.__post_init__(self)

        self.net          = Network(self)
        self.auth         = Auth(self)
        self.profile      = Profile(self)
        self.account_data = AccountData(self)
        self.rooms        = Rooms(self)
        self.sync         = Sync(self)
        self.e2e          = E2E(self)
        self.devices      = Devices(self)
        self.media        = MediaStore(self)
        self._filters     = FilterStore(self)

        self._lock       = None
        self._terminated = False

        JSONFile.__post_init__(self)


    async def __aenter__(self) -> "Client":
        return self


    async def __aexit__(self, exc_type, exc, tb) -> None:
        await self.terminate()


    def __del__(self) -> None:
        if self._lock:
            self._lock.release()


    @property
    def path(self) -> AsyncPath:
        return AsyncPath(self.base_dir) / "client.json"


    @property
    async def was_saved(self) -> bool:
        data = "{}"
        with suppress(FileNotFoundError):
            data = await self.path.read_text()
        return bool(json.loads(data).get("access_token"))


    async def load(self, **base_dir_placeholders: str) -> "Client":
        self.base_dir = str(self.base_dir).format(**base_dir_placeholders)
        await super().load()

        self._acquire_lock()

        for attr in self.__dict__.values():
            if isinstance(attr, ClientModule):
                await attr.load()

        return self


    async def terminate(self) -> None:
        self.__del__()  # release lock
        await self.net.disconnect()
        self._terminated = True


    def _acquire_lock(self) -> None:
        self._lock = FileLock(str(Path(self.base_dir) / ".lock"))
        self._lock.acquire(timeout=1)
Esempio n. 40
0
class MultiprocessRotatingFileHandler(TimedRotatingFileHandler):
    """
    Ref: https://github.com/di/mrfh/blob/master/mrfh/__init__.py
    * 修改doRollover逻辑,避免日志分片时删除已分片日志
    * 释放日志锁时关闭FileStream,解决文件重命名操作被拒绝的问题
    * 频繁reopen FileStream,造成严重的性能损耗
    * 使用FileLock替代threading.Lock,单进程条件下存在性能损失
    """
    def __init__(self, lock_file, *args, **kwargs):
        self.file_lock = FileLock(lock_file)
        super(MultiprocessRotatingFileHandler, self).__init__(*args, **kwargs)

    def _open_file(self):
        self.stream = self._open()

    def acquire(self):
        self.file_lock.acquire()
        if self.stream and self.stream.closed:
            self._open_file()

    def release(self):
        if self.stream and not self.stream.closed:
            self.stream.flush()
            self.stream.close()
        self.file_lock.release()

    def close(self):
        if self.stream and not self.stream.closed:
            self.stream.flush()
            self.stream.close()
        if self.file_lock.is_locked:
            self.file_lock.release()

    def doRollover(self):
        if self.stream:
            self.stream.close()
            self.stream = None
        # get the time that this sequence started at and make it a TimeTuple
        current_time = int(time.time())
        dst_now = time.localtime(current_time)[-1]
        t = self.rolloverAt - self.interval
        if self.utc:
            time_tuple = time.gmtime(t)
        else:
            time_tuple = time.localtime(t)
            dst_then = time_tuple[-1]
            if dst_now != dst_then:
                if dst_now:
                    addend = 3600
                else:
                    addend = -3600
                time_tuple = time.localtime(t + addend)
        dfn = self.rotation_filename(self.baseFilename + "." +
                                     time.strftime(self.suffix, time_tuple))

        # # Changed part
        # if os.path.exists(dfn):
        #     os.remove(dfn)
        if not os.path.exists(dfn) and os.path.exists(self.baseFilename):
            self.rotate(self.baseFilename, dfn)
        # # Changed part end

        if self.backupCount > 0:
            for s in self.getFilesToDelete():
                os.remove(s)
        if not self.delay:
            self.stream = self._open()
        new_rollover_at = self.computeRollover(current_time)
        while new_rollover_at <= current_time:
            new_rollover_at = new_rollover_at + self.interval
        # If DST changes and midnight or weekly rollover, adjust for this.
        if (self.when == 'MIDNIGHT'
                or self.when.startswith('W')) and not self.utc:
            dst_at_rollover = time.localtime(new_rollover_at)[-1]
            if dst_now != dst_at_rollover:
                if not dst_now:  # DST kicks in before next rollover, so we need to deduct an hour
                    addend = -3600
                else:  # DST bows out before next rollover, so we need to add an hour
                    addend = 3600
                new_rollover_at += addend
        self.rolloverAt = new_rollover_at
Esempio n. 41
0
def main():
    parser = argparse.ArgumentParser(description='project management.',
                                     formatter_class=argparse.
                                     ArgumentDefaultsHelpFormatter,
                                     parents=[get_baseparser(
                                         tool_version=__version__)
                                     ])

    parser.add_argument('-b', '--base', default="/var/opengrok",
                        help='OpenGrok instance base directory')
    parser.add_argument('-R', '--roconfig',
                        help='OpenGrok read-only configuration file')
    parser.add_argument('-U', '--uri', default='http://localhost:8080/source',
                        help='URI of the webapp with context path')
    parser.add_argument('-c', '--configmerge',
                        help='path to the ConfigMerge binary')
    parser.add_argument('--java', help='Path to java binary '
                                       '(needed for config merge program)')
    parser.add_argument('-j', '--jar', help='Path to jar archive to run')
    parser.add_argument('-u', '--upload', action='store_true',
                        help='Upload configuration at the end')
    parser.add_argument('-n', '--noop', action='store_true', default=False,
                        help='Do not run any commands or modify any config'
                             ', just report. Usually implies '
                             'the --debug option.')
    parser.add_argument('-N', '--nosourcedelete', action='store_true',
                        default=False, help='Do not delete source code when '
                                            'deleting a project')

    group = parser.add_mutually_exclusive_group()
    group.add_argument('-a', '--add', metavar='project', nargs='+',
                       help='Add project (assumes its source is available '
                            'under source root')
    group.add_argument('-d', '--delete', metavar='project', nargs='+',
                       help='Delete project and its data and source code')
    group.add_argument('-r', '--refresh', action='store_true',
                       help='Refresh configuration. If read-only '
                            'configuration is supplied, it is merged '
                            'with current '
                            'configuration.')

    try:
        args = parser.parse_args()
    except ValueError as e:
        fatal(e)

    doit = not args.noop
    configmerge = None

    #
    # Setup logger as a first thing after parsing arguments so that it can be
    # used through the rest of the program.
    #
    logger = get_console_logger(get_class_basename(), args.loglevel)

    if args.nosourcedelete and not args.delete:
        logger.error("The no source delete option is only valid for delete")
        sys.exit(1)

    # Set the base directory
    if args.base:
        if path.isdir(args.base):
            logger.debug("Using {} as instance base".
                         format(args.base))
        else:
            logger.error("Not a directory: {}\n"
                         "Set the base directory with the --base option."
                         .format(args.base))
            sys.exit(1)

    # If read-only configuration file is specified, this means read-only
    # configuration will need to be merged with active webapp configuration.
    # This requires config merge tool to be run so couple of other things
    # need to be checked.
    if args.roconfig:
        if path.isfile(args.roconfig):
            logger.debug("Using {} as read-only config".format(args.roconfig))
        else:
            logger.error("File {} does not exist".format(args.roconfig))
            sys.exit(1)

        configmerge_file = get_command(logger, args.configmerge,
                                       "opengrok-config-merge")
        if configmerge_file is None:
            logger.error("Use the --configmerge option to specify the path to"
                         "the config merge script")
            sys.exit(1)

        configmerge = [configmerge_file]
        if args.loglevel:
            configmerge.append('-l')
            configmerge.append(str(args.loglevel))

        if args.jar is None:
            logger.error('jar file needed for config merge tool, '
                         'use --jar to specify one')
            sys.exit(1)

    uri = args.uri
    if not is_web_uri(uri):
        logger.error("Not a URI: {}".format(uri))
        sys.exit(1)
    logger.debug("web application URI = {}".format(uri))

    lock = FileLock(os.path.join(tempfile.gettempdir(),
                                 os.path.basename(sys.argv[0]) + ".lock"))
    try:
        with lock.acquire(timeout=0):
            if args.add:
                for proj in args.add:
                    project_add(doit=doit, logger=logger,
                                project=proj,
                                uri=uri)

                config_refresh(doit=doit, logger=logger,
                               basedir=args.base,
                               uri=uri,
                               configmerge=configmerge,
                               jar_file=args.jar,
                               roconfig=args.roconfig,
                               java=args.java)
            elif args.delete:
                for proj in args.delete:
                    project_delete(logger=logger,
                                   project=proj,
                                   uri=uri, doit=doit,
                                   deletesource=not args.nosourcedelete)

                config_refresh(doit=doit, logger=logger,
                               basedir=args.base,
                               uri=uri,
                               configmerge=configmerge,
                               jar_file=args.jar,
                               roconfig=args.roconfig,
                               java=args.java)
            elif args.refresh:
                config_refresh(doit=doit, logger=logger,
                               basedir=args.base,
                               uri=uri,
                               configmerge=configmerge,
                               jar_file=args.jar,
                               roconfig=args.roconfig,
                               java=args.java)
            else:
                parser.print_help()
                sys.exit(1)

            if args.upload:
                main_config = get_config_file(basedir=args.base)
                if path.isfile(main_config):
                    if doit:
                        with io.open(main_config, mode='r',
                                     encoding="utf-8") as config_file:
                            config_data = config_file.read().encode("utf-8")
                            if not set_configuration(logger,
                                                     config_data, uri):
                                sys.exit(1)
                else:
                    logger.error("file {} does not exist".format(main_config))
                    sys.exit(1)
    except Timeout:
        logger.warning("Already running, exiting.")
        sys.exit(1)
Esempio n. 42
0
class CustomLogGui(wx.LogGui):
    """Logger."""

    def __init__(self, log_file, debug=False):
        """Initialize."""

        self.format = "%(message)s"
        self.file_name = log_file
        self.file_lock = FileLock(self.file_name + '.lock')
        self.debug = debug

        try:
            with self.file_lock.acquire(1):
                with codecs.open(self.file_name, "w", "utf-8") as f:
                    f.write("")
        except Exception:
            self.file_name = None

        wx.LogGui.__init__(self)

    def DoLogText(self, msg):
        """Log the text."""

        try:
            if self.file_name is not None:
                with self.file_lock.acquire(1):
                    with codecs.open(self.file_name, 'a', encoding='utf-8') as f:
                        f.write(msg)
            else:
                msg = "[ERROR] Could not acquire lock for log!\n" + msg
        except Exception:
            self.file_name = None

        if self.debug:
            sys.stdout.write(
                (self.format % {"message": msg})
            )

        wx.LogGui.DoLogText(self, msg)

    def DoLogTextAtLevel(self, level, msg):
        """Perform log at level."""

        current = self.GetLogLevel()

        if level <= current and level == wx.LOG_Info:
            self._debug(msg)
        elif level <= current and level == wx.LOG_FatalError:
            self._critical(msg)
        elif level <= current and level == wx.LOG_Warning:
            self._warning(msg)
        elif level <= current and level == wx.LOG_Error:
            self._error(msg)

    def formatter(self, lvl, log_fmt, msg, msg_fmt=None):
        """Special formatters for log message."""

        return log_fmt % {
            "loglevel": lvl,
            "message": util.to_ustr(msg if msg_fmt is None else msg_fmt(msg))
        }

    def _log(self, msg):
        """Base logger."""

        return self.format % {"message": msg}

    def _debug(self, msg, log_fmt="%(loglevel)s: %(message)s\n"):
        """Debug level log."""

        self.DoLogText(self._log(self.formatter("DEBUG", log_fmt, msg)))

    def _critical(self, msg, log_fmt="%(loglevel)s: %(message)s\n"):
        """Critical level log."""

        self.DoLogText(self._log(self.formatter("CRITICAL", log_fmt, msg)))

    def _warning(self, msg, log_fmt="%(loglevel)s: %(message)s\n"):
        """Warning level log."""

        self.DoLogText(self._log(self.formatter("WARNING", log_fmt, msg)))

    def _error(self, msg, log_fmt="%(loglevel)s: %(message)s\n"):
        """Error level log."""

        self.DoLogText(self._log(self.formatter("ERROR", log_fmt, msg)))