コード例 #1
0
def main():
    parser, args = build_parser()

    if not (valid_env_vars()):
        parser.print_help()
        exit(1)

    print(
        "Pulling Pluralsight Reporting data starting from {0} and ending at {1}".format(args.start_date, args.end_date))

    if args.users:
        print("Pulling users")
        get_users(args.output_folder)

    if args.course_completion:
        print("Pulling course completion")
        get_course_completion(args.start_date, args.end_date, args.output_folder)

    if args.course_usage:
        print("Pulling course usage")
        get_course_usage(args.start_date, args.end_date, args.output_folder)
コード例 #2
0
    def __init__(self, dataset: BaseQA, char_vocab=0, pos_vocab=0):
        tf.set_random_seed(4242)
        self.parser = build_parser()
        self.char_vocab = char_vocab
        self.pos_vocab = pos_vocab
        self.graph = tf.Graph()
        self.args = self.parser.parse_args()
        self.imap = {}
        self.inspect_op = []
        self.feat_prop = None

        self._train_set = None
        self._test_set = None
        self._dev_set = None

        self.word_to_index, self.index_to_embedding = dataset.word_to_index, dataset.index_to_embedding
        self.index_to_word = {val: k for k, val in self.word_to_index.items()}
        self.vocab_size = len(self.index_to_embedding)

        if self.args.init_type == 'xavier':
            self.initializer = tf.contrib.layers.xavier_initializer()
        elif self.args.init_type == 'normal':
            self.initializer = tf.random_normal_initializer()
        elif self.args.init_type == 'uniform':
            self.initializer = tf.random_uniform_initializer(
                maxval=self.args.init, minval=-self.args.init)
        else:
            self.initializer = tf.contrib.layers.xavier_initializer()

        self.build_graph()
        _config_proto = tf.ConfigProto(allow_soft_placement=True,
                                       intra_op_parallelism_threads=8)
        self.sess = tf.Session(graph=self.graph, config=_config_proto)

        with self.graph.as_default():
            self.sess.run(tf.global_variables_initializer())
            self.saver = tf.train.Saver()

        self.ckpt_path = os.path.join(args.ckpt_path, self.__class__.__name__)
コード例 #3
0
ファイル: test.py プロジェクト: karush17/esac
os.sched_setaffinity(os.getpid(), {0})
os.system("taskset -p 0xffffffffffffffffffffffff %d" % os.getpid())

if __name__ == "__main__":
    use_cuda = torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')

    checkpoint_name = './Checkpoint/'

    if not os.path.exists(checkpoint_name):
        os.makedirs(checkpoint_name)

        ######################################### INITIALIZE SETUP #############################################

    # Parse Arguments
    parser = build_parser()
    args = parser.parse_args()

    # Environment
    env = gym.make(args.env)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    env.seed(args.seed)
    STATE_DIM = env.observation_space.shape[0]
    ACTION_DIM = env.action_space
    NUM_WINNERS = int(args.elite_rate * args.pop)
    BATCH_SIZE = args.pop
    STD_NOISE = args.mutation
    NUM_GRAD_MODELS = args.grad_models
    l1_loss = torch.nn.SmoothL1Loss()
    total_start_time = time.time()
コード例 #4
0
import os
from pyspark.sql import SparkSession
from pyspark.ml.recommendation import ALS
from pyspark.ml.evaluation import RegressionEvaluator
import parser


def string_to_list(x):
    x = x.replace(' ', '').split(',')
    return [float(y) for y in x]


parser = parser.build_parser()
(options, args) = parser.parse_args()

APP_DIRECTORY = options.app_directory
DATA_DIRECTORY = os.path.join(APP_DIRECTORY, 'data')
MODELS_DIRECTORY = os.path.join(APP_DIRECTORY, 'models')

RANKS = string_to_list(options.ranks)
MAX_ITERS = string_to_list(options.max_iters)
REG_PARAMS = string_to_list(options.reg_params)

spark = SparkSession.builder.getOrCreate()
interactions_train = spark.read \
    .format('csv') \
    .options(header='true', inferSchema = 'true') \
    .load(os.path.join(DATA_DIRECTORY, 'interactions_train_full.csv'))

interactions_val = spark.read \
    .format('csv') \
コード例 #5
0
    tf.logging.info('end create_feed_data')
    tf.logging.info('start predict')
    preds = hyper_qa.predict(data)
    preds = [p[0] for p in preds]
    ids = np.argsort(preds)[::-1]
    tf.logging.info('end predict')
    print(
        f"Running time: {timeit.timeit('lambda: hyper_qa.predict(data)', number=1)}"
    )
    # tf.logging.info(idx)
    for idx in ids[:top_n]:
        tf.logging.info(data.pos_raw[idx])


if __name__ == '__main__':
    args = build_parser().parse_args()
    word_to_index, index_to_embedding = load_embedding_from_disks(
        args.glove, with_indexes=True)
    tf.logging.info('Embedding loaded')

    dataset = get_ds(args.dataset_name, args.dataset, word_to_index,
                     index_to_embedding, args.qmax, args.amax, args.char_min,
                     args.num_neg)

    hyper_qa = HyperQA(dataset)
    tf.logging.info('HyperQA created')

    hyper_qa.train()

    # test_predict()
コード例 #6
0
ファイル: server.py プロジェクト: sensei89v/service_fp
 def post(self):
     parser = build_parser(PARSER_COUNTER)
     id = self.server_ctx.storage.add(parser)
     response = json.dumps({"id": id})
     self.set_header('Content-Type', 'application/json')
     self.write(response)