コード例 #1
0
def create_graph(sc, cp, delim, wd_languages, rd_languages, ill_languages_from, ill_languages_to):
    G = nx.Graph()

    # add wikidata links
    names = ["id", "language_code", "article_name"]
    wikidata_links = sc.textFile(cp.get('general', 'id2article')).map(get_parser(names))\
                    .filter(lambda x: x['language_code'] in wd_languages and x['id'].startswith('Q'))\
                    .map(lambda x: ('wikidata'+ delim + x['id'], x['language_code'] + delim + x['article_name']))         
    G.add_edges_from(wikidata_links.collect())
    print "Got Wikidata Links"
    # add interlanguage links
    prod_tables = cp.get('general', 'prod_tables')

    names = ['ll_from', 'll_to', 'll_lang']
    for ill_lang in ill_languages_from:
        ill = sc.textFile(os.path.join(prod_tables,  ill_lang + 'wiki_langlinks_joined'))\
        .map(lambda x: x.split('\t'))\
        .filter(lambda x: x[2] in ill_languages_to and len(x[1]) > 0 and len(x[0]) > 0)\
        .map(lambda x: (ill_lang + delim + x[0], x[2] + delim + x[1]))
        G.add_edges_from(ill.collect())
        print "Got ILL links for %s" % ill_lang

    # add redirect links
    names = ['rd_from', 'rd_to']
    for rd_lang in rd_languages:
        rd = sc.textFile(os.path.join(prod_tables,  rd_lang + "wiki_redirect_joined"))\
        .map(lambda x: x.split('\t'))\
        .map(lambda x: (rd_lang + delim + x[0], rd_lang + delim + x[1]))
        G.add_edges_from(rd.collect())
        print "got rdd links for %s" % rd_lang
    return G
コード例 #2
0
    def config_parser_values(self,):
        ''' initialize the cfg parser (help text)
            prompt for run parameters
            set cfg values
        '''
        global parms
        cfg.parser=util.get_parser()
        cfg.parms = cfg.parser.parse_args()

        util.set_cfg_parms()
コード例 #3
0
    def config_parser_values(self, ):
        ''' initialize the cfg parser (help text)
            prompt for run parameters
            set cfg values
        '''
        global parms
        cfg.parser = util.get_parser()
        cfg.parms = cfg.parser.parse_args()

        util.set_cfg_parms()
コード例 #4
0
def main(args, sc):
	exp_dir = args.dir
	language = args.lang
	cp = SafeConfigParser()
	cp.read(args.config)
	base_dir = os.path.join(cp.get('general', 'local_data_dir'), exp_dir)
	hadoop_base_dir = os.path.join(cp.get('general', 'hadoop_data_dir'), exp_dir)

	names = ["language_code", "user_id", "user", "id","page_title","num_edits","timestamp", "bytes_added"]
	contributions_file = os.path.join(cp.get('general', 'contributions_dir'), language)
	contributions = sc.textFile(contributions_file).map(get_parser(names)).filter(lambda x: len(x) == 8)
	contributions = contributions.map(lambda x: (x['user_id'], x)).groupByKey()
	contributions = contributions.map(to_str)
	save_rdd(contributions, base_dir , hadoop_base_dir, cp.get('eval', 'contributions'))
コード例 #5
0
def train(config):

    with open(config.glove_word_emb_file, "r") as wm:
        word_mat = np.array(json.load(wm), dtype=np.float32)

    # create train/dev iterator
    parser = get_parser(config)
    train_dataset = get_train_dataset(config.train_record_file, parser, config)
    dev_dataset = get_dev_dataset(config.dev_record_file, parser, config)
    handle = tf.placeholder(tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)
    train_iterator = train_dataset.make_one_shot_iterator()
    dev_iterator = dev_dataset.make_one_shot_iterator()

    # init model
    model = FlowQA(config=config, iterator=iterator, word_mat=word_mat)

    # init session
    sess_config = tf.ConfigProto(allow_soft_placement=True)
    sess_config.gpu_options.allow_growth = True

    with tf.Session(config=sess_config) as sess:

        writer = tf.summary.FileWriter(config.log_dir, sess.graph)
        sess.run(tf.global_variables_initializer())

        train_handle = sess.run(train_iterator.string_handle())
        dev_handle = sess.run(dev_iterator.string_handle())

        sess.run(tf.assign(model.learning_rate, tf.constant(config.learning_rate, dtype=tf.float32)))
        sess.run(tf.assign(model.is_train, tf.constant(True, dtype=tf.bool)))
        for _ in tqdm(range(config.train_steps)):
            global_step = sess.run(model.global_step) + 1
            loss, _ = sess.run([model.loss, model.train_op], feed_dict={handle: train_handle})
            if global_step % config.save_period == 0:
                loss_sum = tf.Summary(value=[tf.Summary.Value(tag="model/loss", simple_value=loss)])
                writer.add_summary(loss_sum, global_step)
            if global_step % config.dev_period == 0:
                sess.run(tf.assign(model.is_train, tf.constant(False, dtype=tf.bool)))
                dev_losses = []
                for _ in tqdm(range(config.dev_steps)):
                    dev_loss = sess.run(model.loss, feed_dict={handle: dev_handle})
                    dev_losses.append(dev_loss)

                sess.run(tf.assign(model.is_train, tf.constant(True, dtype=tf.bool)))
                dev_loss_sum = tf.Summary(value=[tf.Summary.Value(tag="model/loss", simple_value=np.mean(dev_loss))])
                writer.add_summary(dev_loss_sum, global_step)
                writer.flush()
コード例 #6
0
def main():
    parser = get_parser()
    #parser.add_argument('infile', help='input i3 file')
    parser.add_argument('infile', help='input json or i3 file')
    args = parser.parse_args()
    
    json_blob_handle = args.infile

    if len(json_blob_handle) == 0:
        raise RuntimeError("need to specify at least one input filename")

    #Read and extract
    if '.i3' in json_blob_handle:
       inputi3 = dataio.I3File(json_blob_handle)
       fpacket = [f for f in inputi3]
    else:

       with open(json_blob_handle) as json_data:
               event = json.load(json_data)
       del json_blob_handle
       
       fpacket = extract_json_message(event)
    with SourceQueue(args.address, args.queue) as queue:
        s = Source(queue)
        tray = I3Tray()
	tray.AddModule(SendPixelsToScan, "SendPixelsToScan",
        FramePacket=fpacket,
        NSide=1,
        InputTimeName="HESE_VHESelfVetoVertexTime",
        InputPosName="HESE_VHESelfVetoVertexPos",
        OutputParticleName="MillipedeSeedParticle",
    )
        tray.Add(FramePacker, sender=s.send)
        tray.Execute()

    print('done!')
コード例 #7
0
ファイル: main.dcgan.py プロジェクト: pkumusic/syn-image
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.utils as vutils
from torch.autograd import Variable

import util
from model_interface import DCGAN
#  from models.charater_embedder import TextEncoder

from tensorboard_logger import configure, log_value

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Text2Fig Generator')
    parser = util.get_parser(parser)

    opt = parser.parse_args()
    print(opt)

    # tensorboard creation
    configure(opt.tensorboardPath)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
コード例 #8
0
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import json
import os
import numpy as np
import random
from tensorboardX import SummaryWriter
import tqdm

from data_loader import *
from models import model_dict
from util import get_parser

parser = get_parser()
parser = parser.parse_args()

use_cuda = torch.cuda.is_available()
if use_cuda:
    if parser.manual_seed >= 0:
        torch.cuda.manual_seed(parser.manual_seed)
device = torch.device("cuda" if use_cuda else "cpu")


def _generate_shuffled_incides(shuffled_indices, n_servers, n_dim):
    return idxs


shuffled_indices = list(range(parser.n_servers))
""" Crops videos into 1024x1024 patches.

CL Args:
  -i Path to directory with input videos.
  -o Path to directory with output videos.
"""

import os
import numpy as np
import skvideo.io
from util import get_parser
import warnings
warnings.filterwarnings('ignore')

args = get_parser().parse_args()

inPath = args.input
outPath = args.output

numVid = 0

for k, vid in enumerate(os.listdir(inPath)):
    print(vid)

    video = skvideo.io.vread(os.path.join(inPath, vid))

    for i in np.arange(200, np.size(video, 1) - 1024 - 200, 200):
        for j in np.arange(200, np.size(video, 2) - 1024 - 200, 150):
            skvideo.io.vwrite(os.path.join(
                outPath, 'Cropped_' + str(numVid + 1) + '.MP4'),
                              video[:, i:i + 1024, j:j + 1024, :],
コード例 #10
0
                    help='Do nothing else but re run the last deployment.')
parser.add_argument('--colorless',
                    dest='color',
                    action='store_false',
                    default=True,
                    help='Don\'t use any colors.')
parser.set_defaults(dry=False, copy=False)
args = parser.parse_args()

depot = util.expand(args.depot)

#configurations
configurations_file = os.path.join(depot, conf.CONFIGURATIONS_FILE_NAME)
configurations_file_exists = os.path.isfile(configurations_file)
if configurations_file_exists:
    configurations_parser = util.get_parser(configurations_file)
    configurations_parse_succes = not (configurations_parser is None)


def deploy():
    """
    Deploy SUS entirely
    """

    deploy_configurations()

    if args.last_run_file:
        make_last_run_file()

def deploy_configurations():
    """