Ejemplo n.º 1
0
def main(srl_method_name, evaluator_name, example_name, fold, seed, alpha,
         study, out_directory):
    """
    Driver for BOWLOS weight learning
    """

    # Initialize logging level, switch to DEBUG for more info.
    initLogging(logging_level=logging.INFO)

    logging.info("Performing BowlOS on {}:{}:{}".format(
        srl_method_name, evaluator_name, example_name))

    # model specific parameters
    num_weights = HELPER_METHODS[srl_method_name]['get_num_weights'](
        example_name)
    predicate = EVAL_PREDICATE[example_name]

    logging.info("Optimizing over {} weights".format(num_weights))

    # the dataframes we will be using for evaluation
    truth_df = load_truth_frame(example_name, fold, predicate, 'learn')
    observed_df = load_observed_frame(example_name, fold, predicate, 'learn')
    target_df = load_target_frame(example_name, fold, predicate, 'learn')

    get_function_value = write_get_function_value_fun(
        srl_method_name, example_name, fold, seed, evaluator_name,
        out_directory, study, truth_df, observed_df, target_df)

    best_weights = doLearn(num_weights, seed, get_function_value, alpha)

    HELPER_METHODS[srl_method_name]['write_learned_weights'](best_weights,
                                                             example_name)
Ejemplo n.º 2
0
 def initLogging( self ):
     if len(sys.argv) > 1:
         # testdriver <module>
         identifier = 'test_%s' % sys.argv[1]
     else:
         # testsuite
         identifier = 'testsuite'
         
     log.initLogging( self.getLogDir() , identifier )
Ejemplo n.º 3
0
def main(tuffy_dir, psl_dir, experiment):
    # Initialize logging level, switch to DEBUG for more info.
    log.initLogging(logging_level=logging.INFO)

    logging.info("Working on experiment %s" % (experiment))

    tuffy_experiment = os.path.join(tuffy_dir, experiment)
    psl_experiment = os.path.join(psl_dir, experiment)

    helper = load_helper(os.path.join(tuffy_experiment, HELPER))

    if experiment not in os.listdir(os.path.join(psl_experiment, DATA)):
        logging.error("No data directory named %s in %s" %
                      (experiment, os.path.join(psl_experiment, DATA)))
        return

    for split_dir in os.listdir(os.path.join(psl_experiment, DATA,
                                             experiment)):
        if not os.path.isdir(
                os.path.join(psl_experiment, DATA, experiment, split_dir)):
            continue

        for phase in [EVAL, LEARN]:
            p_split = os.path.join(psl_experiment, DATA, experiment, split_dir,
                                   phase)
            t_split = os.path.join(tuffy_experiment, DATA, experiment,
                                   split_dir, phase)
            data = []
            if not os.path.isdir(p_split):
                logging.error("No eval/learn in %s" % (os.path.join(
                    psl_experiment, DATA, experiment, p_split_dir)))
                continue

            for predicate in helper:
                split_data, predicate_data = load_split(predicate, p_split)
                data = data + split_data

                if predicate[H_TARGET] == TRUE:
                    with open(os.path.join(t_split, QUERY_FILE),
                              'w') as out_file:
                        out_file.write('\n'.join(predicate_data))

            write_data(data, t_split)
Ejemplo n.º 4
0
def main():
    # Initialize logging level, switch to DEBUG for more info.
    log.initLogging(logging_level=logging.DEBUG)

    # Load in the config options and set the seed
    config = load_config()
    if not config['seed'] == None:
        random.seed(config['seed'])

    # Run in text mode or graphics mode
    if config['use_text']:
        item_list = load_text(os.path.join(TEXT_DIR, config['text_filename']))
        try:
            shuffled_items = METHODS[config['method']](item_list,
                                                       config['num_items'])
            print_items(shuffled_items, config['row_size'])
        except:
            logging.warning("Method \"%s\" not supported." %
                            (config['method']))
    else:
        logging.warning("Graphics currently not supported.")
Ejemplo n.º 5
0
# -*-coding=utf-8
import json
import requests
from Signature import *
import sys
sys.path.append('..')
import log
import logging
log.initLogging('/data/rx/log')
sign = Signature('2', '123', '5321e33f2819487e99a6d52f92dc7cd7',
                 str(int(time.time())))
token = sign.create_token_base64()
decode = sign.decode_base64(token)
contentType = 'application/x-www-form-urlencoded'  # form数据封装到http body中,然后发送到server  name=test&gender=male&[email protected]
header = {'Content-Type': contentType, 'Authorization': 'Bearer %s' % token}


def delete_yidian_ad(feedyidianId):
    boby = {
        "biz": 2,
        "ownerId": 1000001603,
        "adId": feedyidianId,
    }
    try:
        url = 'http://183.131.22.111/v1/ad/delete'
        resp = requests.post(url, data=boby, headers=header)
        resp.close()
        if resp.status_code != 200:
            logging.error('delete_yidian_ad_fail')
            logging.info('delete_yidian_ad_code:' + str(resp.status_code))
            logging.info(resp.content)
Ejemplo n.º 6
0
def main(srl_method_name, evaluator_name, example_name, fold, seed, alpha, study, out_directory):
    """
    Driver for RGS weight learning
    """
    # path to this file relative to caller
    dirname = os.path.dirname(__file__)

    # Initialize logging level, switch to DEBUG for more info.
    initLogging(logging_level=logging.INFO)

    logging.info("Performing RGS on {}:{}:{}".format(srl_method_name, evaluator_name, example_name))

    # the same grid as the default psl core implementation of RGS
    grid = GRID[srl_method_name]

    # the same number of iterations as the default psl RGS for this experiment
    n = 50

    # model specific parameters
    num_weights = HELPER_METHODS[srl_method_name]['get_num_weights'](example_name)
    predicate = EVAL_PREDICATE[example_name]

    # the dataframe we will be using as ground truth for this process
    truth_df = load_truth_frame(example_name, fold, predicate, 'learn')
    observed_df = load_observed_frame(example_name, fold, predicate, 'learn')
    target_df = load_target_frame(example_name, fold, predicate, 'learn')

    # initial state
    if IS_HIGHER_REP_BETTER[evaluator_name]:
        best_performance = -np.inf
    else:
        best_performance = np.inf
    best_weights = np.zeros(num_weights)
    np.random.seed(int(seed))

    for i in range(n):
        logging.info("Iteration {}".format(i))

        # obtain a random weight configuration for the model
        weights = np.random.choice(grid, num_weights)
        logging.info("Trying Configuration: {}".format(weights))

        # assign weight configuration to the model file
        HELPER_METHODS[srl_method_name]['write_learned_weights'](weights, example_name)

        # perform inference
        # TODO: psl file structure does not fit this pattern: wrapper_learn
        process = subprocess.Popen('cd {}/../{}_scripts; ./run_inference.sh {} {} {} {} {} {}'.format(
            dirname, srl_method_name, example_name, 'RGS', 'wrapper_learn', fold, evaluator_name, out_directory),
            shell=True)
        process.wait()

        # fetch results
        if study == "robustness_study":
            predicted_df = HELPER_METHODS[srl_method_name]['load_prediction_frame'](example_name, 'RGS', evaluator_name,
                                                                                    seed, predicate, study, "learn", alpha)
        else:
            predicted_df = HELPER_METHODS[srl_method_name]['load_prediction_frame'](example_name, 'RGS', evaluator_name,
                                                                                    fold, predicate, study, "learn", alpha)

        performance = EVALUATE_METHOD[evaluator_name](predicted_df, truth_df, observed_df, target_df)

        logging.info("Configuration Performance: {}: {}".format(evaluator_name, performance))

        # update best weight configuration if improved
        if IS_HIGHER_REP_BETTER[evaluator_name]:
            if performance > best_performance:
                best_performance = performance
                best_weights = weights
        else:
            if performance < best_performance:
                best_performance = performance
                best_weights = weights

    # assign best weight configuration to the model file
    HELPER_METHODS[srl_method_name]['write_learned_weights'](best_weights, example_name)
Ejemplo n.º 7
0
def main(srl_method_name, evaluator_name, example_name, fold, seed, alpha,
         study, out_directory):
    """
    Driver for CRGS weight learning
    :param srl_method_name:
    :param evaluator_name:
    :param example_name:
    :param fold:
    :param seed:
    :param alpha:
    :param study:
    :param out_directory:
    :return:
    """
    # path to this file relative to caller
    dirname = os.path.dirname(__file__)

    # Initialize logging level, switch to DEBUG for more info.
    initLogging(logging_level=logging.INFO)

    logging.info("Performing CRGS on {}:{}:{}".format(srl_method_name,
                                                      evaluator_name,
                                                      example_name))

    # the number of samples
    n = NUM_SAMPLES

    # the defaults from the psl core code and recentered for tuffy to allow for negative weights.
    weight_mean = MEAN[srl_method_name]
    variance = 0.20

    # model specific parameters
    num_weights = HELPER_METHODS[srl_method_name]['get_num_weights'](
        example_name)
    predicate = EVAL_PREDICATE[example_name]

    # parameters for sampling distribution
    mean_vector = np.array([weight_mean] * num_weights)
    variance_matrix = np.eye(num_weights) * variance

    logging.info("Optimizing over {} weights".format(num_weights))

    # the dataframes we will be using for evaluation
    truth_df = load_truth_frame(example_name, fold, predicate, 'learn')
    observed_df = load_observed_frame(example_name, fold, predicate, 'learn')
    target_df = load_target_frame(example_name, fold, predicate, 'learn')

    # initial state
    if IS_HIGHER_REP_BETTER[evaluator_name]:
        best_performance = -np.inf
    else:
        best_performance = np.inf
    best_weights = np.zeros(num_weights)
    print("setting seed {}".format(seed))
    np.random.seed(int(seed))

    for i in range(n):
        logging.info("Iteration {}".format(i))

        # obtain a random weight configuration for the model
        # sample from dirichlet and randomly set the orthant
        weights = np.random.dirichlet(
            (np.ones(num_weights) * alpha)) * np.random.choice([-1, 1],
                                                               num_weights)
        logging.info("Trying Configuration: {}".format(weights))

        # assign weight configuration to the model file
        HELPER_METHODS[srl_method_name]['write_learned_weights'](weights,
                                                                 example_name)

        # perform inference
        # TODO: (Charles.) psl file structure needs to fit this pattern: wrapper_learn
        logging.info("writing to {}".format(out_directory))
        process = subprocess.Popen(
            'cd {}/../{}_scripts; ./run_inference.sh {} {} {} {} {}'.format(
                dirname, srl_method_name, example_name, 'wrapper_learn', fold,
                evaluator_name, out_directory),
            shell=True)
        logging.info("Waiting for inference")
        process.wait()

        # fetch results
        if study == "robustness_study":
            predicted_df = HELPER_METHODS[srl_method_name][
                'load_prediction_frame'](example_name, 'CRGS', evaluator_name,
                                         seed, predicate, study, alpha)
        else:
            predicted_df = HELPER_METHODS[srl_method_name][
                'load_prediction_frame'](example_name, 'CRGS', evaluator_name,
                                         fold, predicate, study, alpha)
        performance = EVALUATE_METHOD[evaluator_name](predicted_df, truth_df,
                                                      observed_df, target_df)

        logging.info("Configuration Performance: {}: {}".format(
            evaluator_name, performance))

        # update best weight configuration if improved
        if IS_HIGHER_REP_BETTER[evaluator_name]:
            if performance > best_performance:
                best_performance = performance
                best_weights = weights
        else:
            if performance < best_performance:
                best_performance = performance
                best_weights = weights

    # assign best weight configuration to the model file
    HELPER_METHODS[srl_method_name]['write_learned_weights'](best_weights,
                                                             example_name)
Ejemplo n.º 8
0
"""

import sys
import os
import pandas as pd
import re
import logging

# Adds higher directory to python modules path.
sys.path.append("..")

from helpers import load_file
from log import initLogging

# Initialize logging level, switch to DEBUG for more info.
initLogging(logging_level=logging.INFO)

TUFFY_EXAMPLES_PATH = '../../tuffy-examples'


def _get_example_directory(example_name):
    # path to this file relative to caller
    dirname = os.path.dirname(__file__)

    # tuffy example directory relative to this directory
    example_directory = os.path.join(dirname, TUFFY_EXAMPLES_PATH,
                                     example_name)

    return example_directory

Ejemplo n.º 9
0
sdk_material_path = None 
while True:
    if not sdk_material_path:
        sdk_material_path = os.path.realpath('.')
    else:
        sdk_material_path = os.path.join(sdk_material_path, '..')
    ant_path = os.path.join(sdk_material_path, 'sdk\\python\\ants.py')
    if os.path.isfile(ant_path):
        break 
sys.path.append(os.path.split(ant_path)[0])

from random import shuffle
from ants import *

import log, logging
log.initLogging()
logger = logging.getLogger('main')

class Scratch:
    def do_turn(self, ants):
        logger.info('scratch bot turn {0}'.format(ants.my_ants()))

        destinations = []
        for a_row, a_col in ants.my_ants():
            # try all directions randomly until one is passable and not occupied
            directions = AIM.keys()
            shuffle(directions)
            for direction in directions:
                (n_row, n_col) = ants.destination(a_row, a_col, direction)
                if (not (n_row, n_col) in destinations and
                        ants.passable(n_row, n_col)):
Ejemplo n.º 10
0
Archivo: rwt.py Proyecto: liuyang1/test
#! /usr/bin/env python
"""
read and write test for Storage System
liuyang1,mtime: 2012-12-04 13:18:11 
"""

import os
from random import randint
import log

g_path="/home/liuy/video/"
g_logger=log.initLogging("RWT")

def copy(filename):
	cmd="cp "+g_path+filename+" /tmp/"
	os.popen(cmd)
	g_logger.info(cmd)
	return "/tmp/"+os.path.basename(filename)

def remove(filename):
	cmd="rm "+filename
	os.popen(cmd)
	g_logger.info(cmd)
	return

def rand(filelist):
	return filelist[randint(0,len(filelist)-1)%len(filelist)]

def getFileList(filepath):
	return os.listdir(filepath)
Ejemplo n.º 11
0
def main(psl_to_tuffy_helper_dir, tuffy_experiment_dir, psl_experiment_dir, experiment):
    """
    Procedure for translating data in psl format to Tuffy format.
    """
    # Initialize logging level, switch to DEBUG for more info.
    initLogging(logging_level=logging.INFO)

    logging.info("Working on experiment %s" % experiment)

    # save experiment paths for psl_to_tuffy_examples and psl
    psl_to_tuffy_helper_experiment_path = os.path.join(psl_to_tuffy_helper_dir, experiment)
    tuffy_experiment_path = os.path.join(tuffy_experiment_dir, experiment)
    psl_experiment_path = os.path.join(psl_experiment_dir, experiment)

    predicate_properties = load_predicate_properties(os.path.join(psl_to_tuffy_helper_experiment_path, PREDICATES_FILE))

    # ensure that data set exists in the psl_experiment_path
    if experiment not in os.listdir(os.path.join(psl_experiment_path, DATA)):
        # data has not been fetched yet
        logging.info("%s data has not been fetched yet. Fetching ..." % experiment)

        # go to psl experiment directory and run fetch data script
        cwd = os.getcwd()
        os.chdir(os.path.join(psl_experiment_path, DATA))
        os.system(os.path.join(psl_experiment_path, DATA) + '/fetchData.sh')
        os.chdir(cwd)

    for split_dir in os.listdir(os.path.join(psl_experiment_path, DATA, experiment)):
        for phase in [EVAL, BUILT_IN_LEARN, WRAPPER_LEARN]:
            logging.info("Translating %s PSL to Tuffy ..." % (experiment + ':' + split_dir + ':' + phase))

            if phase == EVAL:
                psl_phase = EVAL
            else:
                psl_phase = LEARN

            psl_split_path = os.path.join(psl_experiment_path, DATA, experiment, split_dir, psl_phase)
            tuffy_split_path = os.path.join(tuffy_experiment_path, DATA, experiment, split_dir, phase)
            evidence_data = []
            query_data = []

            if not os.path.isdir(psl_split_path):
                logging.error("No eval/learn in %s" % (os.path.join(psl_experiment_path, DATA, experiment, psl_phase)))
                continue

            for predicate in predicate_properties:
                split_data, predicate_data = load_split(predicate, psl_split_path)

                if predicate[H_TARGET] == TRUE:
                    # if the predicate is a target, then it should be in the Tuffy query file
                    query_data = query_data + predicate_data
                elif predicate[H_TRUTH] == TRUE:
                    if phase == BUILT_IN_LEARN:
                        # if the predicate is a truth, and its a built in learn trial,
                        # then it should be in the Tuffy evidence
                        evidence_data = evidence_data + split_data
                else: 
                    # if the predicate is neither truth or target, then its evidence and should be
                    # in the Tuffy evidence file
                    evidence_data = evidence_data + split_data

            write_data(query_data, tuffy_split_path, QUERY_FILE)
            write_data(evidence_data, tuffy_split_path, EVIDENCE_FILE)
Ejemplo n.º 12
0
def main(srl_method_name, evaluator_name, example_name, fold, seed, alpha,
         study, out_directory):
    """
    Driver for HB weight learning
    :param srl_method_name:
    :param evaluator_name:
    :param example_name:
    :param fold:
    :param seed:
    :param study:
    :param out_directory:
    :return:
    """
    # path to this file relative to caller
    dirname = os.path.dirname(__file__)

    # Initialize logging level, switch to DEBUG for more info.
    initLogging(logging_level=logging.INFO)

    logging.info("Performing Hyperband on {}:{}:{}".format(
        srl_method_name, evaluator_name, example_name))

    # model specific parameters
    num_weights = HELPER_METHODS[srl_method_name]['get_num_weights'](
        example_name)
    predicate = EVAL_PREDICATE[example_name]

    logging.info("Optimizing over {} weights".format(num_weights))

    # the dataframes we will be using for evaluation
    truth_df = load_truth_frame(example_name, fold, predicate, 'learn')
    observed_df = load_observed_frame(example_name, fold, predicate, 'learn')
    target_df = load_target_frame(example_name, fold, predicate, 'learn')

    # inital state
    np.random.seed(int(seed))

    def get_random_configuration():
        weights = np.random.dirichlet(
            (np.ones(num_weights) * alpha)) * np.random.choice([-1, 1],
                                                               num_weights)
        return weights

    def run_then_return_val_loss(num_iters, weights):
        # assign weight configuration to the model file
        HELPER_METHODS[srl_method_name]['write_learned_weights'](weights,
                                                                 example_name)

        # extra options to set max number of iterations
        extra_options = MAX_ITER_OPTION[srl_method_name] + str(
            int(np.ceil(num_iters)))

        # perform inference
        # TODO: (Charles.) psl file structure needs to fit this pattern if we want to use this wrapper : wrapper_learn
        process = subprocess.Popen(
            'cd {}/../{}_scripts; ./run_inference.sh {} {} {} {} {} {}'.format(
                dirname, srl_method_name, example_name, 'wrapper_learn', fold,
                evaluator_name, out_directory, extra_options),
            shell=True)
        process.wait()

        # fetch results
        if study == "robustness_study":
            predicted_df = HELPER_METHODS[srl_method_name][
                'load_prediction_frame'](example_name, 'HB', evaluator_name,
                                         seed, predicate, study, alpha)
        else:
            predicted_df = HELPER_METHODS[srl_method_name][
                'load_prediction_frame'](example_name, 'HB', evaluator_name,
                                         fold, predicate, study, alpha)

        # return negative if we are maximizing performance else positive
        if IS_HIGHER_REP_BETTER[evaluator_name]:
            return -EVALUATE_METHOD[evaluator_name](predicted_df, truth_df,
                                                    observed_df, target_df)
        else:
            return EVALUATE_METHOD[evaluator_name](predicted_df, truth_df,
                                                   observed_df, target_df)

    max_iter = MAX_ITER_DEFAULT[
        srl_method_name]  # maximum iterations/epochs per configuration
    eta = SURVIVAL_DEFAULT  # defines downsampling rate (default=4)
    logeta = lambda x: np.log(x) / np.log(eta)
    s_max = int(
        logeta(max_iter)
    )  # number of unique executions of Successive Halving (minus one)
    B = (
        s_max + 1
    ) * max_iter  # total number of iterations (without reuse) per execution of Succesive Halving (n,r)

    # initialize
    best_val = np.inf
    best_weights = np.zeros(num_weights)

    # Begin Finite Horizon Hyperband outerloop.
    for s in reversed(range(s_max + 1)):
        n = int(np.ceil(int(B / max_iter / (s + 1)) *
                        eta**s))  # initial number of configurations
        r = max_iter * eta**(
            -s)  # initial number of iterations to run configurations for

        # Begin Finite Horizon Successive Halving with (n,r)
        T = [get_random_configuration() for _ in range(n)]
        val_losses = []
        total_iter = 0
        for i in range(s + 1):
            # Run each of the n_i configs for r_i iterations and keep best n_i/eta
            n_i = n * eta**(-i)
            r_i = r * eta**(i)
            total_iter = total_iter + r_i
            # the standard algorithm will only run r_i iterations, but this is a continuation of the optimization
            # we are starting over, so to get the same effect we need to run the total number of iterations over again.
            # Note: Very inefficient
            val_losses = [
                run_then_return_val_loss(num_iters=total_iter, weights=t)
                for t in T
            ]
            T = [
                T[i] for i in np.argsort(val_losses)[0:int(np.ceil(n_i / eta))]
            ]
            logging.info(
                "Successive halving: (n,r) = ({}, {}) Bracket winners: Configs: {} Vals: {}"
                .format(n_i, r_i, T,
                        np.sort(val_losses)[0:int(np.ceil(n_i / eta))]))

        tournament_winning_val = min(val_losses)
        logging.info(
            "Hyperband outerloop: (s) = ({}) Tournament winner: Config: {} Val:"
            .format(s, T, tournament_winning_val))
        if tournament_winning_val < best_val:
            best_weights = T[0]

    # assign best weight configuration to the model file
    HELPER_METHODS[srl_method_name]['write_learned_weights'](best_weights,
                                                             example_name)
Ejemplo n.º 13
0
 def initLogging( self ):
     log.initLogging( self.getLogDir() , self.toolName )