Exemplo n.º 1
0
	def test_getPrioStatus(self):
		ds = DataStore()
		ds.addStatus(Type.priority, u'prioAbc')

		# 優先ステータスが取得できること
		assert_equal(u'prioAbc', ds.popStatus(Type.priority)[1])
		# 取得でステータスが削除されること
		assert_false(ds.popStatus(Type.priority))
Exemplo n.º 2
0
	def test_AddStatus(self):
		ds = DataStore()
		ds.addStatus(Type.normal, u'abc')

		lst = ds.getStatuses(Type.normal)
		detect = False
		for s in lst:
			if s[1] == u'abc':
				detect = True

		# ステータスが登録されること
		assert_true(detect)
Exemplo n.º 3
0
	def test_setPrioStatus(self):
		ds = DataStore()
		ds.addStatus(Type.priority, u'prioAbc')

		lst = ds.getStatuses(Type.priority)
		detect = False
		for s in lst:
			if s[1] == u'prioAbc':
				detect = True

		# 優先ステータスが登録されること
		assert_true(detect)
def main(argv):
    conf = {
        "debug": None,
        "logging": None,
    }
    conf.update(DataStore.CONFIG_DEFAULTS)

    args, argv = readconf.parse_argv(argv, conf, strict=False)
    if argv and argv[0] in ('-h', '--help'):
        print(
            """Usage: python -m Abe.reconfigure [-h] [--config=FILE] [--CONFIGVAR=VALUE]...

Apply configuration changes to an existing Abe database, if possible.

  --help                    Show this help message and exit.
  --config FILE             Read options from FILE.
  --use-firstbits {true|false}
                            Turn Firstbits support on or off.
  --keep-scriptsig false    Remove input validation scripts from the database.

All configuration variables may be given as command arguments.""")
        return 0

    logging.basicConfig(stream=sys.stdout,
                        level=logging.DEBUG,
                        format="%(message)s")
    if args.logging is not None:
        import logging.config as logging_config
        logging_config.dictConfig(args.logging)

    store = DataStore.new(args)
    firstbits.reconfigure(store, args)
    keep_scriptsig_reconfigure(store, args)
    return 0
Exemplo n.º 5
0
def main(argv):
    conf = {
        "debug":                    None,
        "logging":                  None,
        }
    conf.update(DataStore.CONFIG_DEFAULTS)

    args, argv = readconf.parse_argv(argv, conf,
                                     strict=False)
    if argv and argv[0] in ('-h', '--help'):
        print ("""Usage: python -m Abe.admin [-h] [--config=FILE] COMMAND...

Options:

  --help                    Show this help message and exit.
  --config FILE             Abe configuration file.

Commands:

  delete-chain-blocks NAME  Delete all blocks in the specified chain
                            from the database.

  delete-chain-transactions NAME  Delete all blocks and transactions in
                            the specified chain.

  delete-tx TX_ID           Delete the specified transaction.
  delete-tx TX_HASH

  link-txin                 Link transaction inputs to previous outputs.

  rewind-datadir DIRNAME    Reset the pointer to force a rescan of
                            blockfiles in DIRNAME.""")
        return 0

    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format="%(message)s")
    if args.logging is not None:
        import logging.config as logging_config
        logging_config.dictConfig(args.logging)

    store = DataStore.new(args)

    while len(argv) != 0:
        command = argv.pop(0)
        if command == 'delete-chain-blocks':
            delete_chain_blocks(store, argv.pop(0))
        elif command == 'delete-chain-transactions':
            delete_chain_transactions(store, argv.pop(0))
        elif command == 'delete-tx':
            delete_tx(store, argv.pop(0))
        elif command == 'rewind-datadir':
            rewind_datadir(store, argv.pop(0))
        elif command == 'link-txin':
            link_txin(store)
        else:
            raise ValueError("Unknown command: " + command)

    return 0
Exemplo n.º 6
0
def main(argv):
    conf = {"debug": None, "logging": None}
    conf.update(DataStore.CONFIG_DEFAULTS)

    args, argv = readconf.parse_argv(argv, conf, strict=False)
    if argv and argv[0] in ("-h", "--help"):
        print(
            """Usage: python -m Abe.reconfigure [-h] [--config=FILE] [--CONFIGVAR=VALUE]...

Apply configuration changes to an existing Abe database, if possible.

  --help                    Show this help message and exit.
  --config FILE             Read options from FILE.
  --use-firstbits {true|false}
                            Turn Firstbits support on or off.
  --keep-scriptsig false    Remove input validation scripts from the database.

All configuration variables may be given as command arguments."""
        )
        return 0

    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(message)s")
    if args.logging is not None:
        import logging.config as logging_config

        logging_config.dictConfig(args.logging)

    store = DataStore.new(args)
    firstbits.reconfigure(store, args)
    keep_scriptsig_reconfigure(store, args)
    return 0
Exemplo n.º 7
0
def build_measure_GISFM(ensemble_matrix, target, score):
    """
    Function to create the global ISFM measure
    :param ensemble_matrix: A numpy array of num_classifiers by num_classes by num_instances
    :param target: An array with the real class
    :param score: The score to use for classifier performance calculation
    :return:
    """
    num_classifiers, num_classes, num_instances = ensemble_matrix.shape
    similarities = compute_similarities(ensemble_matrix)
    # To store measure
    measure = DataStore.DictDataStore(num_classifiers)
    confidences = np.empty((num_classifiers, ))
    # Get the callable score function
    if score == "acc":
        performance_function = accuracy
    elif score == "tpr":
        performance_function = tpr_mean
    elif score == "gm":
        performance_function = gm
    elif score == "f1":
        performance_function = get_f_measure_function(target)
    elif score == "auc":
        performance_function = get_auc_score_function(target)
    elif score == "ap":
        performance_function = get_ap_score_function(target)
    else:
        raise Exception(
            "score must be 'acc', 'tpr', 'gm', 'f1', 'auc' or 'ap'")
    # For each individual classifier get its performance
    for i in range(num_classifiers):
        # Get accuracy of classifiers i
        prob = ensemble_matrix[i, :, :]
        if score == "auc" or score == "ap":
            val = performance_function(target, prob.T)
        else:
            pred = np.argmax(prob, axis=0)
            val = performance_function(target, pred)
        confidences[i] = val
        measure.put((i, ), 0.0)
    # Get the order of confidences
    order = np.argsort(confidences)
    # Calculate values
    for i in range(len(order)):
        s = similarities[order[i], order[i + 1:]]
        if len(s) == 0:
            s = 0.0
        else:
            s = s.max()
        measure.put((order[i], ), confidences[order[i]] * (1 - s))

    for i in all_combs(range(num_classifiers)):
        if len(i) > 1:
            v = 0.0
            for j in i:
                v += measure.get((j, ))
            measure.put(i, v)

    measure.normalize()
    return measure
Exemplo n.º 8
0
def CheckFixedBlock(ws, params, logger):
    fixed_block = ws.get_fixed_block(unbuffered=True)
    if not fixed_block:
        return None
    # check clocks
    try:
        s_time = DataStore.safestrptime(
            fixed_block['date_time'], '%Y-%m-%d %H:%M')
    except Exception:
        s_time = None
    if s_time:
        c_time = datetime.now().replace(second=0, microsecond=0)
        diff = abs(s_time - c_time)
        if diff > timedelta(minutes=2):
            logger.warning(
                "Computer and weather station clocks disagree by %s (H:M:S).", str(diff))
    # store weather station type
    params.set('fixed', 'ws type', ws.ws_type)
    # store info from fixed block
    pressure_offset = fixed_block['rel_pressure'] - fixed_block['abs_pressure']
    old_offset = eval(params.get('fixed', 'pressure offset', 'None'))
    if old_offset and abs(old_offset - pressure_offset) > 0.01:
        # re-read fixed block, as can get incorrect values
        logger.warning('Re-read fixed block')
        fixed_block = ws.get_fixed_block(unbuffered=True)
        if not fixed_block:
            return None
        pressure_offset = fixed_block['rel_pressure'] - fixed_block['abs_pressure']
    if old_offset and abs(old_offset - pressure_offset) > 0.01:
        logger.warning(
            'Pressure offset change: %g -> %g', old_offset, pressure_offset)
    params.set('fixed', 'pressure offset', '%g' % (pressure_offset))
    params.set('fixed', 'fixed block', str(fixed_block))
    params.flush()
    return fixed_block
Exemplo n.º 9
0
	def test_TweetCountup(self):
		ds = DataStore()

		ds.addStatus(Type.normal, u'status1')


		ds.tweetCountup(Type.normal, u'status1')
		normallst = ds.getStatuses(Type.normal)
		assert_true((normallst[0][1]==u'status1' and normallst[0][3]==1))

		ds.tweetCountup(Type.normal, u'status1')
		normallst = ds.getStatuses(Type.normal)
		assert_true((normallst[0][1]==u'status1' and normallst[0][3]==2))
Exemplo n.º 10
0
def main(argv):
    conf = {
        "debug": None,
        "logging": None,
    }
    conf.update(DataStore.CONFIG_DEFAULTS)

    args, argv = readconf.parse_argv(argv, conf, strict=False)
    if argv and argv[0] in ('-h', '--help'):
        print("""Usage: python -m Abe.admin [-h] [--config=FILE] COMMAND...

Options:

  --help                    Show this help message and exit.
  --config FILE             Abe configuration file.

Commands:

  delete-chain-blocks NAME  Delete all blocks in the specified chain
                            from the database.

  delete-chain-transactions NAME  Delete all blocks and transactions in
                            the specified chain.

  delete-tx TX_ID           Delete the specified transaction.
  delete-tx TX_HASH

  link-txin                 Link transaction inputs to previous outputs.

  rewind-datadir DIRNAME    Reset the pointer to force a rescan of
                            blockfiles in DIRNAME.""")
        return 0

    logging.basicConfig(stream=sys.stdout,
                        level=logging.DEBUG,
                        format="%(message)s")
    if args.logging is not None:
        import logging.config as logging_config
        logging_config.dictConfig(args.logging)

    store = DataStore.new(args)

    while len(argv) != 0:
        command = argv.pop(0)
        if command == 'delete-chain-blocks':
            delete_chain_blocks(store, argv.pop(0))
        elif command == 'delete-chain-transactions':
            delete_chain_transactions(store, argv.pop(0))
        elif command == 'delete-tx':
            delete_tx(store, argv.pop(0))
        elif command == 'rewind-datadir':
            rewind_datadir(store, argv.pop(0))
        elif command == 'link-txin':
            link_txin(store)
        else:
            raise ValueError("Unknown command: " + command)

    return 0
Exemplo n.º 11
0
def get_info(content):
    if ('mod_hash' in content and
        'username' in content):
        info = DB.get_info(username = content['username'],
                            mod_hash = content['mod_hash'])
        
        
        return info
    else:
        return {'type':'error',
                'error':'must login first'}
Exemplo n.º 12
0
def build_measure_m_aggregation(ensemble_matrix,
                                target,
                                m_function,
                                score="acc"):
    """
    Returns the measure for the OIFM method
    :param ensemble_matrix: A numpy array of num_classifiers by num_classes by num_instances
    :param target: An array with the real class
    :param m_function: The function to use for measure calculation
    :param score: The score to use for classifier performance calculation
    :return: Measure
    """
    # ensemble_matrix => num_classifiers, num_instances
    num_classifiers, num_classes, num_instances = ensemble_matrix.shape
    performances = np.empty((num_classifiers, ))
    if score == "acc":
        performance_function = accuracy
    elif score == "tpr":
        performance_function = tpr_mean
    elif score == "gm":
        performance_function = gm
    elif score == "f1":
        performance_function = get_f_measure_function(target)
    elif score == "auc":
        performance_function = get_auc_score_function(target)
    elif score == "ap":
        performance_function = get_ap_score_function(target)
    else:
        raise Exception(
            "score must be 'acc', 'tpr', 'gm', 'f1', 'auc' or 'ap'")
    # For each individual classifier get its performance
    for i in range(num_classifiers):
        # Get accuracy of classifiers i
        prob = ensemble_matrix[i, :, :]
        if score == "auc" or score == "ap":
            val = performance_function(target, prob.T)
        else:
            pred = np.argmax(prob, axis=0)
            val = performance_function(target, pred)
        performances[i] = val

    measure = DataStore.DictDataStore(num_classifiers)
    # Calculate denominator
    performances_2 = np.power(performances, 2)
    denominator = m_function(performances_2)
    # For each combination get the measure value
    for i in all_combs(range(num_classifiers)):
        v = np.zeros((num_classifiers, ))
        for j in i:
            v[j] = performances_2[j]
        nominator = m_function(v)
        measure.put(i, nominator / denominator)

    return measure
Exemplo n.º 13
0
def build_measure_additive(ensemble_matrix, target, score="acc"):
    """
    Function that builds the additive measure
    :param ensemble_matrix: A numpy array of num_classifiers by num_classes by num_instances
    :param target: An array with the real class
    :param score: The score to use for classifier performance calculation
    :return: The additive measure
    """
    # ensemble_matrix => num_classifiers, num_instances
    num_classifiers, num_classes, num_instances = ensemble_matrix.shape
    performances = np.empty(num_classifiers)
    if score == "acc":
        # performance_function = metrics.accuracy_score
        performance_function = accuracy
    elif score == "tpr":
        performance_function = tpr_mean
    elif score == "gm":
        performance_function = gm
    elif score == "f1":
        performance_function = get_f_measure_function(target)
    elif score == "auc":
        performance_function = get_auc_score_function(target)
    elif score == "ap":
        performance_function = get_ap_score_function(target)
    else:
        raise Exception(
            "score must be 'acc', 'tpr', 'gm', 'f1', 'auc' or 'ap'")
    # for each possible classifier combination
    for i in range(num_classifiers):
        # Get accuracy of classifiers i
        prob = ensemble_matrix[i, :, :]
        if score == "auc" or score == "ap":
            val = performance_function(target, prob.T)
        else:
            # prob => num_classes, num_instances
            pred = np.argmax(prob, axis=0)
            val = performance_function(target, pred)
        performances[i] = val

    level_mean = performances.mean()
    y = performances - level_mean
    values = (1.0 /
              num_classifiers) + np.tanh(y * 100) / (2.0 * num_classifiers)

    measure = DataStore.DictDataStore(num_classifiers)
    # For each accuracy set measure value as variation of mean based on difference with the level mean
    for i in all_combs(range(num_classifiers)):
        value = 0.0
        for j in i:
            value += values[j]
        measure.put(i, value)

    measure.normalize()
    return measure
Exemplo n.º 14
0
def randomize(graph):
    i = 0
    for n in graph.nodes():
        i += 1
        randomloc = float(random.randint(0, 999999999))/1000000000
        newnode = (randomloc,)
        neighbors = graph.neighbors(n)
        graph.add_node(newnode, id=i, ds=DataStore(100000))
        for neighbor in neighbors:
            graph.add_edge(newnode, neighbor)
        graph.remove_node(n)
Exemplo n.º 15
0
	def test_tweetEnable(self):
		ds = DataStore()

		ds.setTweetEnable(True)

		assert_equal(True, ds.getTweetEnable())

		ds.setTweetEnable(False)

		assert_equal(False, ds.getTweetEnable())
Exemplo n.º 16
0
def login(content):
    mod_hash = False
    if 'mod_hash' in content:
        mod_hash = content['mod_hash']
        
    password = False
    if 'password' in content:
        password = content['password']
        
    ret  = DB.validate(username=content['username'],
                       password=password,
                       mod_hash=mod_hash)
    return ret
Exemplo n.º 17
0
	def test_DeleteSettings(self):
		ds = DataStore()
		ds.setMentionId(100)

		ds.deleteSettings()

		# MentionIdがクリア値(0)に戻ること
		assert_equal(0, ds.getMentionId())
Exemplo n.º 18
0
    def do_parse(self, no, html):
        ParseHTMLStructure().feed(html)
        response = []
        for lk in links:
            if not DataStore.isDuplicate(self.logger, lk):
                response.append(lk)

        if len(title) > 0 and len(infos) > 0:
            DataStore.save(self.logger, title[0], infos)

        del infos[:]
        del links[:]
        del title[:]
        if len(response) == 0:
            self.logger.info('no urls found')
            return None

        layer = str(no + 1)
        msg = ''
        for url in response:
            msg = msg + layer + ',' + url + '|'
        msg = msg[:-1]
        return msg
Exemplo n.º 19
0
def SendGCMMessage(username,message):
    gcm = GCM(GCM_API_KEY)
    reg_id = [DataStore.getPlayerGCMid(username)]
    if (reg_id == False or
        len(reg_id)==0):
        #do nothing
        return False
    response = gcm.json_request(registration_ids=reg_id, data=message)

    # Handling errors
    if 'errors' in response:
        return response['errors']
    else:
        #do i need to save this reg_id?
        return {'reg_id':reg_id}
Exemplo n.º 20
0
def save_drug(content):
    status = DB.save_drug(
        mod_hash=content["mod_hash"],
        brand_name=content["brand_name"],
        generic_name=content["generic_name"],
        how_supplied_storage_prior=content["how_supplied_storage_prior"],
        reconstitution_concentration=content["reconstitution_concentration"],
        stability_post_reconstruction=content["stability_post_reconstruction"],
        vehicle_dilution=content["vehicle_dilution"],
        administration=content["administration"],
        misc_notes=content["misc_notes"],
        references=content["references"],
        black_box=content["black_box"],
    )
    return status
Exemplo n.º 21
0
def unregisterDataStores(dataStorePaths):
    unregisterSuccessful = True
    print "\n---------------------------------------------------------------------------"
    print "- Unregister temporary 'replicated' set of data stores used for publishing..."
    print "---------------------------------------------------------------------------"
    
    for itemPath in dataStorePaths:
        print "\n\t" + itemPath
        success, response = DataStore.unregister(serverFQDN, serverPort, userName, passWord, itemPath, useSSL)
        if success:
            print "\tDone."
        else:
            unregisterSuccessful = False
            print "ERROR:" + str(response)
                
    return unregisterSuccessful
Exemplo n.º 22
0
    def __init__(self, mentors=None, mentees=None, person_dict=None):
        self.ds = DataStore.DataStore()
        self.matching = None
        self.matching_list = None

        if mentors is None and mentees is None and person_dict is None:
            self.mentors, self.mentees, self.person_dict = self.ds.load_data(
                "initial_data_{}".format(global_vars.ROUND))
        else:
            self.mentors = mentors
            self.mentees = mentees
            self.person_dict = person_dict

        self.assignment_matrix = None
        self.now = datetime.now()  # current date and time
        self.now_str = self.now.strftime("%Y%m%d_%H_%M_%S")
Exemplo n.º 23
0
 def init_DS_list(self):
     """
     Init a list of empty DSs (Data Stores == caches)
     """
     self.DS_list = [
         DataStore.DataStore(ID=i,
                             size=self.DS_size,
                             bpe=self.bpe,
                             mr1_estimation_window=self.estimation_window,
                             max_fpr=self.max_fpr,
                             max_fnr=self.max_fnr,
                             verbose=self.verbose,
                             uInterval=self.uInterval,
                             num_of_insertions_between_estimations=self.
                             num_of_insertions_between_estimations)
         for i in range(self.num_of_DSs)
     ]
def unregisterDataStores(dataStorePaths):
    unregisterSuccessful = True
    print "\n---------------------------------------------------------------------------"
    print "- Unregister temporary 'replicated' set of data stores used for publishing..."
    print "---------------------------------------------------------------------------"

    for itemPath in dataStorePaths:
        print "\n\t" + itemPath
        success, response = DataStore.unregister(serverFQDN, serverPort,
                                                 userName, passWord, itemPath,
                                                 useSSL)
        if success:
            print "\tDone."
        else:
            unregisterSuccessful = False
            print "ERROR:" + str(response)

    return unregisterSuccessful
    def GetBBCNewsTitles(self, BeebXML):
        Titles = ProcessNewsTitles.ProcessTitles()
        db = DataStore.MongoDBStore()
        db.Setup()
        Titles = ProcessNewsTitles.ProcessTitles()

        feed = feedparser.parse(BeebXML)
        for x in range(0, len(feed['entries'])):
            entry = feed['entries'][x]['title']
            print entry

            Titles.title = entry
            Titles.tokenize_title()
            Titles.position_tags()
            Titles.find_noun()
            Titles.find_verb()

            db.SaveRecord(entry)
Exemplo n.º 26
0
def main(argv):
    logging.basicConfig(level=logging.DEBUG)
    args, argv = readconf.parse_argv(argv, DataStore.CONFIG_DEFAULTS,
                                     strict=False)
    if argv and argv[0] in ('-h', '--help'):
        print "Usage: verify.py --dbtype=MODULE --connect-args=ARGS"
        return 0
    store = DataStore.new(args)
    logger = logging.getLogger("verify")
    checked, bad = 0, 0
    for (chain_id,) in store.selectall("""
        SELECT chain_id FROM chain"""):
        logger.info("checking chain %d", chain_id)
        checked1, bad1 = verify_tx_merkle_hashes(store, logger, chain_id)
        checked += checked1
        bad += bad1
    logger.info("All chains: %d Merkle trees, %d bad", checked, bad)
    return bad and 1
Exemplo n.º 27
0
	def test_DeleteStatus(self):
		ds = DataStore()
		ds.addStatus(Type.normal, u'abc')
		ds.removeStatus(Type.normal, u'abc')

		lst = ds.getStatuses(Type.normal)
		detect = False
		for s in lst:
			if s[1] == u'abc':
				detect = True

		# 登録したステータスが削除されること
		assert_false(detect)
Exemplo n.º 28
0
def main(argv):
    conf = {
        "debug":                    None,
        "logging":                  None,
        "count":                    200,
        "seed":                     1,
        "blkfile":                  None,
        }
    conf.update(DataStore.CONFIG_DEFAULTS)

    args, argv = readconf.parse_argv(argv, conf,
                                     strict=False)
    if argv and argv[0] in ('-h', '--help'):
        print ("""Usage: python -m Abe.mixup [-h] [--config=FILE] [--CONFIGVAR=VALUE]...

Load blocks out of order.

  --help                    Show this help message and exit.
  --config FILE             Read options from FILE.
  --count NUMBER            Load COUNT blocks.
  --blkfile FILE            Load the first COUNT blocks from FILE.
  --seed NUMBER             Random seed (not implemented; 0=file order).

All configuration variables may be given as command arguments.""")
        return 0

    if args.blkfile is None:
        raise ValueError("--blkfile is required.")

    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format="%(message)s")
    if args.logging is not None:
        import logging.config as logging_config
        logging_config.dictConfig(args.logging)

    store = DataStore.new(args)
    ds = BCDataStream.BCDataStream()
    file = open(args.blkfile, "rb")
    ds.map_file(file, 0)
    file.close()
    mixup_blocks(store, ds, int(args.count), None, int(args.seed or 0))
    return 0
Exemplo n.º 29
0
def main(argv):
    logging.basicConfig(level=logging.DEBUG)
    args, argv = readconf.parse_argv(argv,
                                     DataStore.CONFIG_DEFAULTS,
                                     strict=False)
    if argv and argv[0] in ('-h', '--help'):
        print "Usage: verify.py --dbtype=MODULE --connect-args=ARGS"
        return 0
    store = DataStore.new(args)
    logger = logging.getLogger("verify")
    checked, bad = 0, 0
    for (chain_id, ) in store.selectall("""
        SELECT chain_id FROM chain"""):
        logger.info("checking chain %d", chain_id)
        checked1, bad1 = verify_tx_merkle_hashes(store, logger, chain_id)
        checked += checked1
        bad += bad1
    logger.info("All chains: %d Merkle trees, %d bad", checked, bad)
    return bad and 1
Exemplo n.º 30
0
def main(argv):
    conf = {
        "debug": None,
        "logging": None,
        "count": 200,
        "seed": 1,
        "blkfile": None,
    }
    conf.update(DataStore.CONFIG_DEFAULTS)

    args, argv = readconf.parse_argv(argv, conf, strict=False)
    if argv and argv[0] in ('-h', '--help'):
        print(
            """Usage: python -m Abe.mixup [-h] [--config=FILE] [--CONFIGVAR=VALUE]...

Load blocks out of order.

  --help                    Show this help message and exit.
  --config FILE             Read options from FILE.
  --count NUMBER            Load COUNT blocks.
  --blkfile FILE            Load the first COUNT blocks from FILE.
  --seed NUMBER             Random seed (not implemented; 0=file order).

All configuration variables may be given as command arguments.""")
        return 0

    if args.blkfile is None:
        raise ValueError("--blkfile is required.")

    logging.basicConfig(stream=sys.stdout,
                        level=logging.DEBUG,
                        format="%(message)s")
    if args.logging is not None:
        import logging.config as logging_config
        logging_config.dictConfig(args.logging)

    store = DataStore.new(args)
    ds = BCDataStream.BCDataStream()
    file = open(args.blkfile, "rb")
    ds.map_file(file, 0)
    file.close()
    mixup_blocks(store, ds, int(args.count), None, int(args.seed or 0))
    return 0
Exemplo n.º 31
0
    def init(self):
        import DataStore, readconf, logging, sys
        self.conf.update({ "debug": None, "logging": None })
        self.conf.update(DataStore.CONFIG_DEFAULTS)

        args, argv = readconf.parse_argv(self.argv, self.conf, strict=False)
        if argv and argv[0] in ('-h', '--help'):
            print self.usage()
            return None, []

        logging.basicConfig(
            stream=sys.stdout, level=logging.DEBUG, format="%(message)s")
        if args.logging is not None:
            import logging.config as logging_config
            logging_config.dictConfig(args.logging)

        store = DataStore.new(args)

        return store, argv
Exemplo n.º 32
0
    try:
        opts, args = getopt.getopt(argv[1:], "hcv",
                                   ['help', 'catchup', 'verbose'])
    except getopt.error, msg:
        print >> sys.stderr, 'Error: %s\n' % msg
        print >> sys.stderr, __usage__.strip()
        return 1
    # process options
    catchup = False
    verbose = 0
    for o, a in opts:
        if o == '-h' or o == '--help':
            print __usage__.strip()
            return 0
        elif o == '-c' or o == '--catchup':
            catchup = True
        elif o == '-v' or o == '--verbose':
            verbose += 1
    # check arguments
    if len(args) != 1:
        print >> sys.stderr, "Error: 1 argument required"
        print >> sys.stderr, __usage__.strip()
        return 2
    logger = ApplicationLogger(verbose)
    return ToMetOffice(DataStore.params(args[0]),
                       DataStore.calib_store(args[0])).Upload(catchup)


if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 33
0
def build_global_mhm(ensemble_matrix, target, score, alpha=1.0):
    """
    Function to create the global mhm measure
    :param ensemble_matrix: A numpy array of num_classifiers by num_classes by num_instances
    :param target: An array with the real class
    :param score: The score to use for classifier performance calculation
    :param alpha: Alpha parameter (paper)
    :return:
    """
    num_classifiers, num_classes, num_instances = ensemble_matrix.shape
    confidences = np.empty((num_classifiers, ))
    # For additive measure
    additive_measure = DataStore.DictDataStore(num_classifiers)
    # Get callable score function
    if score == "acc":
        performance_function = accuracy
    elif score == "tpr":
        performance_function = tpr_mean
    elif score == "gm":
        performance_function = gm
    elif score == "f1":
        performance_function = get_f_measure_function(target)
    elif score == "auc":
        performance_function = get_auc_score_function(target)
    elif score == "ap":
        performance_function = get_ap_score_function(target)
    else:
        raise Exception(
            "score must be 'acc', 'tpr', 'gm', 'f1', 'auc' or 'ap'")
    # For each individual classifier get its performance
    for i in range(num_classifiers):
        prob = ensemble_matrix[i, :, :]
        if score == "auc" or score == "ap":
            val = performance_function(target, prob.T)
        else:
            pred = np.argmax(prob, axis=0)
            val = performance_function(target, pred)
        confidences[i] = val

    # Calculate additive measure
    for i in all_combs(range(num_classifiers)):
        if len(i) == 1:
            additive_measure.put(i, confidences[i[0]])
        else:
            v = 0.0
            for j in i:
                v += additive_measure.get((j, ))
            additive_measure.put(i, v)
    additive_measure.normalize()

    # Compute similarities and relative diversity
    similarities = compute_similarities(ensemble_matrix)
    relative_diversity = relative_diversity_dict(similarities)

    # Calculate the final measure
    measure = DataStore.DictDataStore(num_classifiers)
    for i in all_combs(range(num_classifiers)):
        value = additive_measure.get(i) * (1 + alpha * relative_diversity[i])
        measure.put(i, value)

    measure.correct_monotonicity()
    measure.normalize()
    return measure
Exemplo n.º 34
0
        opts, args = getopt.getopt(
            argv[1:], "hcv", ['help', 'catchup', 'verbose'])
    except getopt.error, msg:
        print >>sys.stderr, 'Error: %s\n' % msg
        print >>sys.stderr, __usage__.strip()
        return 1
    # process options
    catchup = False
    verbose = 0
    for o, a in opts:
        if o == '-h' or o == '--help':
            print __usage__.strip()
            return 0
        elif o == '-c' or o == '--catchup':
            catchup = True
        elif o == '-v' or o == '--verbose':
            verbose += 1
    # check arguments
    if len(args) != 2:
        print >>sys.stderr, "Error: 2 arguments required"
        print >>sys.stderr, __usage__.strip()
        return 2
    logger = ApplicationLogger(verbose)
    return ToService(
        DataStore.params(args[0]), DataStore.calib_store(args[0]),
        service_name=args[1]
        ).Upload(catchup)

if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 35
0
	def test_DeleteAllStatus(self):
		ds = DataStore()
		ds.addStatus(Type.normal, u'abc')
		ds.addStatus(Type.normal, u'def')
		ds.addStatus(Type.normal, u'ghi')
		ds.addStatus(Type.priority, u'abc')
		ds.addStatus(Type.priority, u'def')
		ds.addStatus(Type.priority, u'ghi')

		ds.removeStatuses()

		# リストが0件であること
		lst = ds.getStatuses()
		assert_equal(0, len(lst))
Exemplo n.º 36
0
def build_measure(ensemble_matrix, target, score="acc"):
    """
    Function that builds the CPM measure
    :param ensemble_matrix: A numpy array of num_classifiers by num_classes by num_instances
    :param target: An array with the real class
    :param score: The score to use for classifier performance calculation
    :return: The additive measure
    """
    # ensemble_matrix => num_classifiers, num_instances
    num_classifiers, num_classes, num_instances = ensemble_matrix.shape
    # Store the performances
    performances = dict()
    # Store the mean of each level
    level_mean = dict()
    # Get callable performance function
    if score == "acc":
        # performance_function = metrics.accuracy_score
        performance_function = accuracy
    elif score == "tpr":
        performance_function = tpr_mean
    elif score == "gm":
        performance_function = gm
    elif score == "f1":
        performance_function = get_f_measure_function(target)
    elif score == "auc":
        performance_function = get_auc_score_function(target)
    elif score == "ap":
        performance_function = get_ap_score_function(target)
    else:
        raise Exception(
            "score must be 'acc', 'tpr', 'gm', 'f1', 'auc' or 'ap'")
    # for each possible classifier combination
    for i in all_combs(range(num_classifiers)):
        classifiers_prob = ensemble_matrix[i, :, :]
        # Mean of probabilities
        prob = np.mean(classifiers_prob, axis=0)
        if score == "auc" or score == "ap":
            val = performance_function(target, prob.T)
        else:
            # prob => num_classes, num_instances
            pred = np.argmax(prob, axis=0)
            val = performance_function(target, pred)
        # Add performances and store for level mean calculation
        performances[i] = val
        if len(i) not in level_mean:
            level_mean[len(i)] = [0.0, 0.0]
        level_mean[len(i)][0] += val
        level_mean[len(i)][1] += 1.0

    # Calculate the mean per level
    for k in level_mean.keys():
        level_mean[k] = level_mean[k][0] / level_mean[k][1]

    # Calculate the measure
    measure = DataStore.DictDataStore(num_classifiers)
    # For each accuracy set measure value as variation of mean based on difference with the level mean
    for i in all_combs(range(num_classifiers)):
        y = performances[i] - level_mean[len(i)]
        # value = (float(len(i)) / float(num_classifiers)) * (1 + y)
        value = (float(len(i)) / float(num_classifiers)) + np.tanh(
            y * 100) / (2.0 * num_classifiers)
        measure.put(i, value)

    return measure
Exemplo n.º 37
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        opts, args = getopt.getopt(argv[1:], "h", ['help'])
    except getopt.error, msg:
        print >> sys.stderr, 'Error: %s\n' % msg
        print >> sys.stderr, __usage__.strip()
        return 1
    # process options
    for o, a in opts:
        if o in ('-h', '--help'):
            print __usage__.strip()
            return 0
    # check arguments
    if len(args) != 2:
        print >> sys.stderr, "Error: 2 arguments required"
        print >> sys.stderr, __usage__.strip()
        return 2
    logger = ApplicationLogger(1)
    params = DataStore.params(args[0])
    Localisation.SetApplicationLanguage(params)
    if ToTwitter(params).UploadFile(args[1]):
        return 0
    return 3


if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 38
0
import ZDCatch, interpreter, DataStore

data = ZDCatch.zd_catch()
xieyihuoqi = interpreter.catch_xyhuoqi(data)
DataStore.store_xyhq(xieyihuoqi)
Exemplo n.º 39
0
        print >>sys.stderr, 'Error: %s\n' % msg
        print >>sys.stderr, __usage__.strip()
        return 1
    # process options
    clear = False
    sync = None
    verbose = 0
    for o, a in opts:
        if o in ('-h', '--help'):
            print __usage__.strip()
            return 0
        elif o in ('-c', '--clear'):
            clear = True
        elif o in ('-s', '--sync'):
            sync = int(a)
        elif o in ('-v', '--verbose'):
            verbose += 1
    # check arguments
    if len(args) != 1:
        print >>sys.stderr, 'Error: 1 argument required\n'
        print >>sys.stderr, __usage__.strip()
        return 2
    logger = ApplicationLogger(verbose)
    root_dir = args[0]
    return LogData(
        DataStore.params(root_dir), DataStore.data_store(root_dir),
        sync=sync, clear=clear)

if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 40
0
        opts, args = getopt.getopt(argv[1:], "hcv",
                                   ['help', 'catchup', 'verbose'])
    except getopt.error, msg:
        print >> sys.stderr, 'Error: %s\n' % msg
        print >> sys.stderr, __usage__.strip()
        return 1
    # process options
    catchup = False
    verbose = 0
    for o, a in opts:
        if o == '-h' or o == '--help':
            print __usage__.strip()
            return 0
        elif o == '-c' or o == '--catchup':
            catchup = True
        elif o == '-v' or o == '--verbose':
            verbose += 1
    # check arguments
    if len(args) != 2:
        print >> sys.stderr, "Error: 2 arguments required"
        print >> sys.stderr, __usage__.strip()
        return 2
    logger = ApplicationLogger(verbose)
    return ToService(DataStore.params(args[0]),
                     DataStore.calib_store(args[0]),
                     service_name=args[1]).Upload(catchup)


if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 41
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        opts, args = getopt.getopt(argv[1:], "h", ['help'])
    except getopt.error, msg:
        print >> sys.stderr, 'Error: %s\n' % msg
        print >> sys.stderr, __doc__.strip()
        return 1
    # process options
    for o, a in opts:
        if o == '-h' or o == '--help':
            print __doc__.strip()
            return 0
    # check arguments
    if len(args) != 4:
        print >> sys.stderr, 'Error: 4 arguments required\n'
        print >> sys.stderr, __doc__.strip()
        return 2
    params = DataStore.params(args[0])
    Localisation.SetApplicationLanguage(params)
    return RosePlotter(params, DataStore.calib_store(args[0]),
                       DataStore.hourly_store(args[0]),
                       DataStore.daily_store(args[0]),
                       DataStore.monthly_store(args[0]),
                       args[1]).DoPlot(args[2], args[3])


if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 42
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        opts, args = getopt.getopt(argv[1:], "h", ['help'])
    except getopt.error, msg:
        print >> sys.stderr, 'Error: %s\n' % msg
        print >> sys.stderr, __doc__.strip()
        return 1
    # process options
    for o, a in opts:
        if o == '-h' or o == '--help':
            print __doc__.strip()
            return 0
    # check arguments
    if len(args) != 4:
        print >> sys.stderr, 'Error: 4 arguments required\n'
        print >> sys.stderr, __doc__.strip()
        return 2
    params = DataStore.params(args[0])
    Localisation.SetApplicationLanguage(params)
    return GraphPlotter(params, DataStore.data_store(args[0]),
                        DataStore.hourly_store(args[0]),
                        DataStore.daily_store(args[0]),
                        DataStore.monthly_store(args[0]),
                        args[1]).DoPlot(args[2], args[3])


if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 43
0
	def test_removeStatusesSelectType(self):
		ds = DataStore()

		ds.addStatus(Type.normal, u'status1')
		ds.addStatus(Type.normal, u'status2')
		ds.addStatus(Type.normal, u'status3')

		ds.addStatus(Type.priority, u'priority1')
		ds.addStatus(Type.priority, u'priority2')
		ds.addStatus(Type.priority, u'priority3')

		ds.addStatus(Type.omikuji, u'omikuji1')
		ds.addStatus(Type.omikuji, u'omikuji2')
		ds.addStatus(Type.omikuji, u'omikuji3')

		ds.removeStatuses(Type.normal)

		normallst = ds.getStatuses(Type.normal)
		assert_equal(0, len(normallst))

		prioritylst = ds.getStatuses(Type.priority)
		assert_true((Type.priority==prioritylst[0][0] and prioritylst[0][1]==u'priority1'))
		assert_true((Type.priority==prioritylst[1][0] and prioritylst[1][1]==u'priority2'))
		assert_true((Type.priority==prioritylst[2][0] and prioritylst[2][1]==u'priority3'))

		omikujilst = ds.getStatuses(Type.omikuji)
		assert_true((Type.omikuji==omikujilst[0][0] and omikujilst[0][1]==u'omikuji1'))
		assert_true((Type.omikuji==omikujilst[1][0] and omikujilst[1][1]==u'omikuji2'))
		assert_true((Type.omikuji==omikujilst[2][0] and omikujilst[2][1]==u'omikuji3'))
Exemplo n.º 44
0
	def test_removeStatuses(self):
		ds = DataStore()

		ds.addStatus(Type.normal, u'status1')
		ds.addStatus(Type.normal, u'status2')
		ds.addStatus(Type.normal, u'status3')

		ds.addStatus(Type.priority, u'priority1')
		ds.addStatus(Type.priority, u'priority2')
		ds.addStatus(Type.priority, u'priority3')

		ds.addStatus(Type.omikuji, u'omikuji1')
		ds.addStatus(Type.omikuji, u'omikuji2')
		ds.addStatus(Type.omikuji, u'omikuji3')

		ds.removeStatuses()

		normallst = ds.getStatuses(Type.normal)
		assert_equal(0, len(normallst))

		prioritylst = ds.getStatuses(Type.priority)
		assert_equal(0, len(prioritylst))

		omikujilst = ds.getStatuses(Type.omikuji)
		assert_equal(0, len(omikujilst))
Exemplo n.º 45
0
	def test_MentionId(self):
		ds = DataStore()
		ds.setMentionId(100)

		# 登録したMentionIdが取得できること
		assert_equal(100, ds.getMentionId())
Exemplo n.º 46
0
def delete_user(content):
    ret = DB.delete_user(username=content['username'],
                         mod_hash=content['mod_hash'],
                         user=content['user'])
    return ret
Exemplo n.º 47
0
def reset_password(content):
    ret = DB.reset_password(username=content['username'],
                            mod_hash=content['mod_hash'],
                            user=content['user'])
    return ret
Exemplo n.º 48
0
        of.write('    </auto_update>\n')
        of.write('  </current_weather>\n')
        of.write('</response>\n')
        of.close()
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        opts, args = getopt.getopt(argv[1:], "hv", ['help', 'verbose'])
    except getopt.error, msg:
        print >>sys.stderr, 'Error: %s\n' % msg
        print >>sys.stderr, __doc__.strip()
        return 1
    # process options
    verbose = 0
    for o, a in opts:
        if o == '-h' or o == '--help':
            print __doc__.strip()
            return 0
        elif o == '-v' or o == '--verbose':
            verbose += 1
    # check arguments
    if len(args) != 2:
        print >>sys.stderr, "Error: 2 arguments required"
        print >>sys.stderr, __doc__.strip()
        return 2
    logger = ApplicationLogger(verbose)
    return YoWindow(DataStore.calib_store(args[0])).write_file(args[1])
if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 49
0
'''
Created on Aug 22, 2015

@author: robertbitel
'''
import DataStore

from LeagueInfo import MAFALeagueInfo, MAFALeagueInfo_Playoffs, SuhNommieNationLeagueInfo, FlexLeague, AutoLeagueInfo, CreateFullVarList, CreateFullOwnerList
from FileWriter import WriteByPosition, WriteFullList_NoOwner, WriteFullList_Owner

if __name__ == '__main__':
    data = DataStore.GetPlayerData()

    mafa_10_league = CreateFullVarList(10, data, MAFALeagueInfo())
    mafa_12_league = CreateFullVarList(12, data, MAFALeagueInfo())
    mafa_14_league = CreateFullVarList(14, data, MAFALeagueInfo())
    mafa_14_playoff_league = CreateFullVarList(14, data, MAFALeagueInfo_Playoffs())
    snn_12_league = CreateFullVarList(12, data, SuhNommieNationLeagueInfo())
    snn_12_league_myteam = CreateFullOwnerList('Strk', 12, data, SuhNommieNationLeagueInfo())
    flex_10_league = CreateFullVarList(10, data, FlexLeague())
    flex_12_league = CreateFullVarList(12, data, FlexLeague())
    AutoLeague_league = CreateFullVarList(12, data, AutoLeagueInfo())


    files_to_write = [  (mafa_10_league, './MAFA_10_VAR_List.csv',  WriteByPosition),
                        (mafa_12_league, './MAFA_12_VAR_List.csv',  WriteByPosition),
                        (mafa_14_league, './MAFA_14_VAR_List.csv',  WriteByPosition),
                        (mafa_14_playoff_league, './MAFA_14_VAR_PLAYOFF_List.csv',  WriteByPosition),
                        (mafa_14_league, './MAFA_14_VAR_Full_List.csv',  WriteFullList_NoOwner),
                        (snn_12_league, './SNN_12_VAR_List.csv',    WriteByPosition),
                        (snn_12_league, './SNN_12_VAR_Full_List.csv',    WriteFullList_Owner),
Exemplo n.º 50
0
    except getopt.error, msg:
        print >>sys.stderr, 'Error: %s\n' % msg
        print >>sys.stderr, __doc__.strip()
        return 1
    # process options
    for o, a in opts:
        if o in ('-h', '--help'):
            print __doc__.strip()
            return 0
    # check arguments
    if len(args) != 1:
        print >>sys.stderr, "Error: 1 argument required"
        print >>sys.stderr, __doc__.strip()
        return 2
    data_dir = args[0]
    params = DataStore.params(data_dir)
    Localisation.SetApplicationLanguage(params)
    hourly_data = DataStore.hourly_store(data_dir)
    idx = hourly_data.before(datetime.max)
    print 'Zambretti (current):', Zambretti(params, hourly_data[idx])
    idx = idx.replace(tzinfo=utc).astimezone(Local)
    if idx.hour < 9:
        idx -= timedelta(hours=24)
    idx = idx.replace(hour=9, minute=0, second=0)
    idx = hourly_data.nearest(idx.astimezone(utc).replace(tzinfo=None))
    print 'Zambretti  (at 9am):', Zambretti(params, hourly_data[idx])
    return 0

if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 51
0
def change_group(content):
    ret = DB.change_group(username=content['username'],
                          mod_hash=content['mod_hash'],
                          user=content['user'],
                          group=content['group'])
    return ret
Exemplo n.º 52
0
	def test_TweetCountup(self):
		ds = DataStore()

		ds.addStatus(Type.normal, u'status1')
		ds.addStatus(Type.normal, u'status2')
		ds.addStatus(Type.normal, u'status3')


		for i in range(0,5):
			ds.tweetCountup(Type.normal, u'status1')
		for i in range(0,4):
			ds.tweetCountup(Type.normal, u'status2')
		for i in range(0,3):
			ds.tweetCountup(Type.normal, u'status3')

		normallst = ds.getStatuses(Type.normal)
		assert_true((normallst[0][1]==u'status1' and normallst[0][3]==5))
		assert_true((normallst[1][1]==u'status2' and normallst[1][3]==4))
		assert_true((normallst[2][1]==u'status3' and normallst[2][3]==3))

		ds.tweetCountAdjust(Type.normal)

		normallst = ds.getStatuses(Type.normal)
		assert_true((normallst[0][1]==u'status1' and normallst[0][3]==2))
		assert_true((normallst[1][1]==u'status2' and normallst[1][3]==1))
		assert_true((normallst[2][1]==u'status3' and normallst[2][3]==0))

		ds.tweetCountAdjust(Type.normal)

		normallst = ds.getStatuses(Type.normal)
		assert_true((normallst[0][1]==u'status1' and normallst[0][3]==2))
		assert_true((normallst[1][1]==u'status2' and normallst[1][3]==1))
		assert_true((normallst[2][1]==u'status3' and normallst[2][3]==0))
Exemplo n.º 53
0
    def execute(self):
        # parameters
        epsilon = .5  # exploration
        epsilon_decay = 0.95
        epsilon_min = 0.1

        epoch = 4000  # is number of cycles...
        max_memory = 2000  #  NEEDS TO BE AS BIG AS AT LEAST 1 TRADING DAY!!!

        batch_size = 50  # 50
        sequence_length = 250  # 500
        discount = 0.95

        training_days = 1
        testing_days = 1

        features_list = list(range(1, 33))  ## FULL
        features_list = list(range(1, 6))  ## SHORT!!

        training_store = ds.DataStore(training_days=training_days,
                                      features_list=features_list,
                                      sequence_length=sequence_length)
        features_length = training_store.get_features_length()
        env = Trading(data_store=training_store,
                      sequence_length=sequence_length,
                      features_length=features_length)

        num_actions = env.get_action_count(
        )  # [sell, buy, flat] # get From TRADING!!

        #testing_store = ds.DataStore(training_days=training_days, testing_days=10, features_list=features_list, sequence_length=sequence_length)

        mo = Models()
        rms = RMSprop(lr=0.0001, rho=0.9, epsilon=1e-06)

        use_ufcnn = True
        if use_ufcnn:
            model = mo.model_ufcnn_concat(sequence_length=sequence_length,
                                          features=features_length,
                                          nb_filter=15,
                                          filter_length=5,
                                          output_dim=num_actions,
                                          optimizer=rms,
                                          loss='mse',
                                          batch_size=batch_size,
                                          init="normal")
            base_model_name = "ufcnn"
        else:
            model = mo.atari_conv_model(output_dim=num_actions,
                                        features=features_length,
                                        loss='mse',
                                        sequence_length=sequence_length,
                                        optimizer=rms,
                                        batch_size=batch_size,
                                        init="normal")
            base_model_name = "atari"

        testing_store = ds.DataStore(training_days=training_days,
                                     testing_days=testing_days,
                                     features_list=features_list,
                                     sequence_length=sequence_length,
                                     mean=training_store.mean,
                                     std=training_store.std)

        test_env = Trading(data_store=testing_store,
                           sequence_length=sequence_length,
                           features_length=features_length)

        #model = mo.atari_conv_model(regression=False, output_dim=num_actions, features=features_length, nb_filter=50,
        #                           loss='mse', sequence_length=sequence_length, optimizer=rms, batch_size=batch_size)

        # If you want to continue training from a previous model, just uncomment the line bellow
        #mo.load_model("ufcnn_rl_training")

        # Define environment/game

        # Initialize experience replay object

        start_time = time.time()
        best_pnl = -99999.
        best_rndless_pnl = -99999.

        exp_replay = ExperienceReplay(max_memory=max_memory,
                                      env=env,
                                      sequence_dim=(sequence_length,
                                                    features_length),
                                      discount=discount)
        lineindex = 0

        # Train
        for e in range(epoch):
            loss = 0.
            game_over = False

            total_reward = 0

            win_cnt = 0
            loss_cnt = 0
            random_cnt = 0
            no_random_cnt = 0

            ### loop over days-...
            for i in range(training_days):
                input_t = env.reset()

                j = 0
                while not game_over:  # game_over ... end of trading day...
                    input_tm1 = input_t
                    #print("INPUT ",input_tm1)
                    # get next action
                    if np.random.rand() <= epsilon:
                        action = np.random.randint(0, num_actions, size=1)[0]
                        random_cnt += 1
                        #print("RANDOM")
                    else:
                        q = model.predict(exp_replay.resize_input(input_tm1))
                        action = np.argmax(q[0])
                        no_random_cnt += 1
                        #print("SELECT")
                        ##action = np.argmax(q)

                    # apply action, get rewards and new state
                    input_t, reward, game_over, idays, lineindex = env.act(
                        action)

                    if reward > 0:
                        win_cnt += 1

                    if reward < 0:
                        loss_cnt += 1

                    total_reward += reward
                    if reward > 1.:
                        reward = 1.

                    if reward < -1.:
                        reward = -1.

                    # store experience
                    exp_replay.remember([action, reward, idays, lineindex - 1],
                                        game_over)

                    # adapt model

                    if j > batch_size:  # do not run exp_rep if the store is empty...
                        inputs, targets = exp_replay.get_batch(
                            model, batch_size=batch_size)
                        curr_loss = model.train_on_batch(
                            exp_replay.resize_input(inputs), targets)
                        loss += curr_loss

                    j += 1

            rndless_pnl = self.get_randomless_pnl(test_env=test_env,
                                                  model=model,
                                                  testing_days=testing_days)

            secs = time.time() - start_time
            print(
                "Epoch {:05d}/{} | Time {:7.1f} | Loss {:11.4f} | Win trades {:5d} | Loss trades {:5d} | Total PnL {:8.2f} | Rndless PnL {:8.2f} | Eps {:.4f} | Rnd: {:5d}| No Rnd: {:5d}  "
                .format(e, epoch, secs, loss, win_cnt, loss_cnt, total_reward,
                        rndless_pnl, epsilon, random_cnt, no_random_cnt),
                flush=True)
            if epsilon > epsilon_min:
                epsilon *= epsilon_decay
            # Save trained model weights and architecture, this will be used by the visualization code

            if total_reward > best_pnl:
                mo.save_model(model, base_model_name + "_rl_best")
                best_pnl = total_reward
            else:
                mo.save_model(model, base_model_name + "_rl_training")

            if rndless_pnl > best_pnl:
                mo.save_model(model, base_model_name + "_rl_rndless_best")
                best_rndless_pnl = rndless_pnl
Exemplo n.º 54
0
if __name__ == "__main__":
    # Make sure this grid size matches the value used fro training

    batch_size = 25  # 50
    sequence_length = 250  # 500

    features_list = list(range(1, 33))  ## FULL
    features_list = list(range(1, 6))  ## SHORT!!

    training_days = 1
    testing_days = 1
    max_memory = 500000

    training_store = ds.DataStore(training_days=training_days,
                                  features_list=features_list,
                                  sequence_length=sequence_length)
    testing_store = ds.DataStore(training_days=training_days,
                                 testing_days=testing_days,
                                 features_list=features_list,
                                 sequence_length=sequence_length,
                                 mean=training_store.mean,
                                 std=training_store.std)

    features_length = training_store.get_features_length()

    env = Trading(data_store=testing_store,
                  sequence_length=sequence_length,
                  features_length=features_length)
    num_actions = env.get_action_count(
    )  # [sell, buy, flat] # get From TRADING!!
Exemplo n.º 55
0
            OK = False
    ftp.close()
    if secure:
        transport.close()
    return OK
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        opts, args = getopt.getopt(argv[1:], "h", ['help'])
    except getopt.error, msg:
        print >>sys.stderr, 'Error: %s\n' % msg
        print >>sys.stderr, __doc__.strip()
        return 1
    # process options
    for o, a in opts:
        if o in ('-h', '--help'):
            print __doc__.strip()
            return 0
    # check arguments
    if len(args) < 2:
        print >>sys.stderr, "Error: at least 2 arguments required"
        print >>sys.stderr, __doc__.strip()
        return 2
    logger = ApplicationLogger(1)
    if Upload(DataStore.params(args[0]), args[1:]):
        return 0
    return 3
if __name__ == "__main__":
    sys.exit(main())
Exemplo n.º 56
0
def register(content):
    ret = DB.register_user(content['username'],content['password'],content['email'])
    return ret
Exemplo n.º 57
0
    def __init__(self, ensemble_matrix, target, dynamic_measure_function,
                 score, integral):
        """

        :param ensemble_matrix: A numpy array of num_classifiers by num_classes by num_instances
        :param target: An array with the real class
        :param dynamic_measure_function: The function that returns the mesure values
        :param score: The score to use for classifier performance calculation
        :param integral: The integral to use (choquet or sugeno)
        """
        # Assertions
        assert integral.lower() in ["choquet", "sugeno"]
        assert score.lower() in ["acc", "tpr", "gm", "f1", "auc", "ap"]
        num_classifiers, num_classes, num_instances = ensemble_matrix.shape
        self.ensemble_matrix = ensemble_matrix
        self.target = target
        self.dynamic_measure_function = dynamic_measure_function
        self.integral = integral.lower()
        # Get callable score
        if score.lower() == "acc":
            performance_function = accuracy
        elif score.lower() == "tpr":
            performance_function = tpr_mean
        elif score == "gm":
            performance_function = gm
        elif score == "f1":
            performance_function = get_f_measure_function(target)
        elif score == "auc":
            performance_function = get_auc_score_function(target)
        elif score == "ap":
            performance_function = get_ap_score_function(target)
        else:
            raise Exception(
                "score must be 'acc', 'tpr', 'gm', 'f1', 'auc' or 'ap'")
        # Calculate the confidence of each classifier
        self.confidences = np.empty((num_classifiers, ))
        for i in range(num_classifiers):
            prob = ensemble_matrix[i, :, :]
            if score == "auc" or score == "ap":
                self.confidences[i] = performance_function(target, prob.T)
            else:
                pred = np.argmax(prob, axis=0)
                self.confidences[i] = performance_function(target, pred)
        # Calculate the similarities
        self.similarities = compute_similarities(ensemble_matrix)
        # If the dynamic function is mhm
        if self.dynamic_measure_function == dynamic_mhm:
            # Calculate the relative diversity
            self.relative_diversity = relative_diversity_dict(
                self.similarities)
            # Calculate the additive measure
            self.additive_measure = DataStore.DictDataStore(
                self.confidences.shape[0])
            for i in all_combs(range(self.confidences.shape[0])):
                if len(i) == 1:
                    self.additive_measure.put(i, self.confidences[i[0]])
                else:
                    v = 0.0
                    for j in i:
                        v += self.additive_measure.get((j, ))
                    self.additive_measure.put(i, v)
            self.additive_measure.normalize()