def __init__(self, x, y, vocab_size, hidden_size, num_layers, pretrained_embeds=None):
        """
        Implements a neural language model using an LSTM.
        Word y_n+1 ~ Softmax(U * h_n)
        :param x A minibatch: each row is an instance (a sequence),
            with batch_size rows
        :param y x shifted by 1, which are the target words to predict
            for the language modeling objective based on the hidden LSTM
            state
        :param vocab_size The number of types in the training data
        :param hidden_size The dimensionality of the word embeddings
        :param pretrained_embeds Pretrained embeddings for initailization as an ND array
        """
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        # Initialize the word embedding table.  If we have pretrained embeddings, we use those
        self.word_embedding_lookup = LookupTable(length=vocab_size, dim=hidden_size, name="word_embeddings")
        if pretrained_embeds is None:
            initialize(self.word_embedding_lookup, 0.8)
        else:
            assert pretrained_embeds.shape[0] == vocab_size and pretrained_embeds.shape[1] == hidden_size
            self.word_embedding_lookup.weights_init = Constant(pretrained_embeds)
            self.word_embedding_lookup.biases_init = Constant(0)
            self.word_embedding_lookup.initialize()

        self.word_embeddings = self.word_embedding_lookup.W

        self.y_hat, self.cost, self.cells = self.nn_fprop(x, y, num_layers)
Пример #2
0
def main(args):
  """Run testing."""
  test_data = utils.read_data(args, "test")
  print("total test samples:%s" % test_data.num_examples)

  if args.random_other:
    print("warning, testing mode with 'random_other' will result in "
          "different results every run...")

  model = models.get_model(args, gpuid=args.gpuid)
  tfconfig = tf.ConfigProto(allow_soft_placement=True)
  tfconfig.gpu_options.allow_growth = True
  tfconfig.gpu_options.visible_device_list = "%s" % (
      ",".join(["%s" % i for i in [args.gpuid]]))

  with tf.Session(config=tfconfig) as sess:
    utils.initialize(load=True, load_best=args.load_best,
                     args=args, sess=sess)

    # load the graph and variables
    tester = models.Tester(model, args, sess)

    perf = utils.evaluate(test_data, args, sess, tester)

  print("performance:")
  numbers = []
  for k in sorted(perf.keys()):
    print("%s, %s" % (k, perf[k]))
    numbers.append("%s" % perf[k])
  print(" ".join(sorted(perf.keys())))
  print(" ".join(numbers))
Пример #3
0
    def __init__(self):
        print(bold_string('Hi, 欢迎使用全自动机器人起居注主编~~'))
        if not os.path.exists(self._INITIALIZE_FILE):
            _, editoraddlist, editordictupper = get_editors_info()
            initialize(editoraddlist, editordictupper)
            with open(self._INITIALIZE_FILE, 'w'):
                pass

        self._date = get_QJZ_date()

        password_file = os.path.join(os.path.dirname(__file__), '.token', 'token')
        password = self._get_decoded_password(password_file)
        if not password:
            password = getpass.getpass("请输入WMWZ的密码(不会显示):")
            # Store encoded password in a file, to avoid inputting password every time.
            self._write_encoded_password(password, password_file)

        try:
            self._bdwm = BDWM('WMWZ', password)
        except BDWM.RequestError as e:
            # If failing to login, remove wrong password file.
            os.remove(password_file)
            raise e
        
        self._year, self._month, self._day = \
            self._date[:4], self._date[4:6], self._date[6:]
        self._title = '未名起居注 {}年{}月{}日'.format(
            self._year, self._month, self._day)
        self._origin_title = self._title
        self._txt_file = os.path.join('QJZ@{}'.format(self._date), 
                                      'QJZ@{}.txt'.format(self._date))
        self._seed_file = '{}.txt'.format(self._date)
    def __init__(self, morpho_idxs, masks, word_idxs, morpho_vocab_size,
                 hidden_size, word_embeds):
        """
        Implements a morpheme-level prior by computing the sum of KL-Div
        of the elements of the morpheme embeddings and the word embeddings
        (where these elements are in [0,1] and are taken as Bernoulli dists).
        :param morpho_idxs A 3D tensor of batch_size x seq_length x max_morphemes_per_word
            Where the 3rd dimension is morpheme indices, padded with 0's so all words have
            the same morpheme decomposition length
        :param masks A 4D tensor of bits which select which values in morpho_idxs are
            padding and which are actual morphemes.  4D is needed for broadcasting
        :param word_idxs A 2D matrix of batch_size x seq_length of word indices
        :param morpho_vocab_size the number of morpheme types seen in training data
        :param hidden_size the dimensionality of morpheme / word embeddings
        :param word_embeds the unconstrained word embeddings from the language model
        """
        self.morpho_vocab_size = morpho_vocab_size
        self.hidden_size = hidden_size
        self.word_embed_lookup = word_embeds  # These are the unconstrained word embeddings

        self.morpho_embed_lookup = LookupTable(length=morpho_vocab_size,
                                               dim=hidden_size,
                                               name="morpho_embeddings")
        initialize(self.morpho_embed_lookup, 0.8)

        self.cost = self.compute_cost(morpho_idxs, masks, word_idxs)
        self.cost.name = "morpho_cost"

        self.norm = self.morpho_embed_lookup.W.norm(2)
        self.norm.name = "morpho_embed_norm"
    def __init__(self, morpho_idxs, masks, word_idxs, morpho_vocab_size, hidden_size, word_embeds):
        """
        Implements a morpheme-level prior by computing the sum of KL-Div
        of the elements of the morpheme embeddings and the word embeddings
        (where these elements are in [0,1] and are taken as Bernoulli dists).
        :param morpho_idxs A 3D tensor of batch_size x seq_length x max_morphemes_per_word
            Where the 3rd dimension is morpheme indices, padded with 0's so all words have
            the same morpheme decomposition length
        :param masks A 4D tensor of bits which select which values in morpho_idxs are
            padding and which are actual morphemes.  4D is needed for broadcasting
        :param word_idxs A 2D matrix of batch_size x seq_length of word indices
        :param morpho_vocab_size the number of morpheme types seen in training data
        :param hidden_size the dimensionality of morpheme / word embeddings
        :param word_embeds the unconstrained word embeddings from the language model
        """
        self.morpho_vocab_size = morpho_vocab_size
        self.hidden_size = hidden_size
        self.word_embed_lookup = word_embeds # These are the unconstrained word embeddings

        self.morpho_embed_lookup = LookupTable(length=morpho_vocab_size,
                dim=hidden_size, name="morpho_embeddings")
        initialize(self.morpho_embed_lookup, 0.8)

        self.cost = self.compute_cost(morpho_idxs, masks, word_idxs)
        self.cost.name = "morpho_cost"

        self.norm = self.morpho_embed_lookup.W.norm(2)
        self.norm.name = "morpho_embed_norm"
Пример #6
0
    def __init__(self, s_dim, a_dim):
        super(Net, self).__init__()

        self.s_dim = s_dim
        self.a_dim = a_dim

        self.cnn = nn.Sequential(
            nn.Conv2d(in_channels=4,
                      out_channels=32,
                      kernel_size=[8, 8],
                      stride=[4, 4],
                      padding=0),
            nn.ReLU(),
            nn.Conv2d(in_channels=32,
                      out_channels=64,
                      kernel_size=[4, 4],
                      stride=[2, 2],
                      padding=0),
            nn.ReLU(),
            nn.Conv2d(in_channels=64,
                      out_channels=64,
                      kernel_size=[3, 3],
                      stride=[1, 1],
                      padding=0),
            nn.ReLU(),
        )

        self.fc = nn.Sequential(nn.Linear(3136, 512), nn.ReLU(),
                                nn.Linear(512, a_dim))

        initialize(self.cnn)
        initialize(self.cnn)
 def lstm_layer(self, h, n):
     """
     Performs the LSTM update for a batch of word sequences
     :param h The word embeddings for this update
     :param n The number of layers of the LSTM
     """
     # Maps the word embedding to a dimensionality to be used in the LSTM
     linear = Linear(input_dim=self.hidden_size, output_dim=self.hidden_size * 4, name='linear_lstm' + str(n))
     initialize(linear, sqrt(6.0 / (5 * self.hidden_size)))
     lstm = LSTM(dim=self.hidden_size, name='lstm' + str(n))
     initialize(lstm, 0.08)
     return lstm.apply(linear.apply(h))
def main(args):
    """
    Initialize the shelf, possibly sync to s3, then check attendance, close
    the shelf and maybe sync the shelf again.

    Args:
        args (ArgumentParser args): Parsed arguments that impact how the check_attandance runs
    """
    if args.s3_sync:
        download_shelve_from_s3()

    if args.debug:
        logging.basicConfig(stream=sys.stdout,
                            level=logging.DEBUG,
                            format="%(message)s")
    else:
        logging.basicConfig(stream=sys.stdout,
                            level=logging.INFO,
                            format="%(message)s")

    store, sc = initialize(update_everyone=True)
    try:
        check_attendance(store, sc, users=args.users)
    finally:
        store.close()
        if args.s3_sync:
            upload_shelve_to_s3()
Пример #9
0
def predict():
    try:
        # f = request.files['file']  
        image = Image.open('./img.jpg').convert("RGB")
        image = image_loader(image)

        encoder, decoder, vocab = initialize()
        features = encoder(image).unsqueeze(1)
        output = decoder.sample(features)
        sentence = clean_sentence(output, vocab)
        res = {}
        res['pred_1'] = sentence

        outputs = decoder.sample_beam_search(features)
        num_sents = min(len(outputs), 3)
        count = 2
        for output in outputs[:num_sents]:
            sentence = clean_sentence(output, vocab)
            res['pred_{}'.format(count)] = sentence
            count += 1
        # print(res)
        return app.response_class(response=json.dumps(res), status=200, mimetype='application/json')
    except Exception as error:
        err = str(error)
        print(err)
        return app.response_class(response=json.dumps(err), status=500, mimetype='application/json')
Пример #10
0
def main(args):
    """
    Initialize the shelf, possibly sync to s3, then generate a meeting, close
    the shelf and maybe sync the shelf again.

    Args:
        args (ArgumentParser args): Parsed arguments that impact how the generate_meeting runs
    """
    if args.s3_sync:
        download_shelve_from_s3()

    store, sc = initialize(update_everyone=True)
    try:
        max_attempts, attempt = 100, 1
        success = False
        while not success:
            success = create_meetings(
                store,
                sc,
                size=args.size,
                whos_out=args.whos_out,
                pairs=args.pairs,
                force_create=args.force_create,
                any_pair=attempt > max_attempts,
            )
            attempt += 1
    finally:
        store.close()
        if args.s3_sync:
            upload_shelve_to_s3()
def generate_samples_cond(config, n_samples, model_name, y_class):
    n_samples = int(n_samples)

    # Initializing generator from configuration
    G = utils.initialize(config, model_name)

    # Update batch size setting used for G
    G_batch_size = max(config['G_batch_size'], config['batch_size'])
    z_ = utils.prepare_z(G_batch_size,
                         G.dim_z,
                         device='cuda',
                         fp16=config['G_fp16'],
                         z_var=config['z_var'])

    # Preparing fixed y tensor
    y_ = utils.make_y(G_batch_size, y_class)

    # Sample function
    sample = functools.partial(utils.sample_cond, G=G, z_=z_, y_=y_)

    # Sampling a number of images and save them to an NPZ
    print('Sampling %d images from class %d...' % (n_samples, y_class))

    x, y = [], []
    for i in trange(int(np.ceil(n_samples / float(G_batch_size)))):
        with torch.no_grad():
            images, labels = sample()
        x += [np.uint8(255 * (images.cpu().numpy() + 1) / 2.)]
        y += [labels.cpu().numpy()]
    x = np.concatenate(x, 0)[:n_samples]
    y = np.concatenate(y, 0)[:n_samples]

    return x, y
Пример #12
0
def initialize(params, step, sL, s, _input):
    # print("initialize")
    #print("Paramset: %s" % params)
    #print("    timestep", s['timestep'])
    #print("    clovers", len(s['s']['clovers']))
    if (s['timestep'] == 0):
        filename = utils.network_filename(params)
        if os.path.exists(filename):
            os.remove(filename)

        s = utils.initialize(params, market_settings, s['s'])
        utils.saveNetwork(s['s']['network'], params)
        s['s']['network'] = None

    s['s']['gasPrice'] = s['s']['gasPrice'] + norm.rvs(loc=0, scale=5)
    if (s['s']['gasPrice'] < 1):
        s['s']['gasPrice'] = 1
    if (s['s']['gasPrice'] > 20):
        s['s']['gasPrice'] = 20

    # reset timestepStats, as these are counters which should be reset
    # at every new timestep
    for key in s['s']['timestepStats'].keys():
        s['s']['timestepStats'][key] = 0
    # print("end-initialize")
    return ('s', s['s'])
Пример #13
0
def main(args):
    """
    Initialize the shelf, possibly sync to s3, then generate a meeting, close
    the shelf and maybe sync the shelf again.

    Args:
        args (ArgumentParser args): Parsed arguments that impact how the generate_meeting runs
    """
    if args.s3_sync:
        download_shelve_from_s3()

    store, sc = initialize(update_everyone=True)
    try:
        max_attempts, attempt = 100, 1
        success = False
        while not success:
            success = create_meetings(
                store,
                sc,
                size=args.size,
                whos_out=args.whos_out,
                pairs=args.pairs,
                force_create=args.force_create,
                any_pair=attempt > max_attempts,
            )
            attempt += 1
    finally:
        store.close()
        if args.s3_sync:
            upload_shelve_to_s3()
Пример #14
0
    def __init__(self,
                 terminal_symb,
                 x,
                 y,
                 size,
                 num_generations=400,
                 crossover_rate=0.7,
                 mutation_rate=0.05,
                 early_stop=0.1,
                 history_len=20):
        self.primitive_symbol = [
            '+', '-', '*', '/', 'sqrt', '^', 'log', 'sin', 'cos', 'tan'
        ]
        self.terminal_symb = terminal_symb
        self.x = x
        self.y = y

        self.size = size
        self.history_len = history_len
        self.old_mutation_rate = mutation_rate
        self.num_generations = num_generations
        self.early_stop = early_stop
        self.crossover_rate = crossover_rate
        self.mutation_rate = mutation_rate
        self.population = [
            initialize(self.terminal_symb, self.primitive_symbol)
            for i in range(self.size)
        ]
        self.status = np.zeros(
            (self.size, ), dtype=int
        )  #Controladora se um cromossomo foi selecionado para a próxima geração
        self.bestCromossome = None

        self.loss_history = []
        self.duration = None
Пример #15
0
def test_beautiful():
    f = open('onani.yml')
    y = yaml.load(f.read())
    f.close()

    links = utils.initialize(y)[0]
    links.get_elements()
    print('toto')
Пример #16
0
def test_runningmeanstd():
    for (x1, x2, x3) in [
        (np.random.randn(3), np.random.randn(4), np.random.randn(5)),
        (np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),
        ]:

        rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
        U.initialize()

        x = np.concatenate([x1, x2, x3], axis=0)
        ms1 = [x.mean(axis=0), x.std(axis=0)]
        rms.update(x1)
        rms.update(x2)
        rms.update(x3)
        ms2 = U.eval([rms.mean, rms.std])

        assert np.allclose(ms1, ms2)
Пример #17
0
def runtime(opt, model, data):
  '''
  Test runtime.
  '''
  batch_size = data['bc'].size(0)
  times = []
  for i in range(batch_size):
    #bc, gt, x = data['bc'][i], data['final'][i], data['x'][i]
    bc = data['bc'][i].unsqueeze(0)
    gt = data['final'][i].unsqueeze(0)
    x = data['x'][i].unsqueeze(0)
    if torch.cuda.is_available():
      bc = bc.cuda()
      gt = gt.cuda()
      x = x.cuda()

    # Initialize with zeros and calculate starting_error
    y = x.clone()
    y = utils.initialize(y, bc, 'zero')
    starting_error = utils.l2_error(y, gt).cpu()

    # Initialize
    x = utils.initialize(x, bc, opt.initialization)
    # Get the errors first
    threshold = 0.01
    errors, _ = utils.calculate_errors(x, bc, None, gt, model.iter_step,
                                       opt.n_evaluation_steps, starting_error,
                                       threshold)
    errors = errors[0].cpu().numpy()
    if np.all(errors >= threshold):
      print('Skip')
      continue
    steps = np.nonzero(errors < threshold)[0][0]
    print('Steps:', steps)

    # Measure time
    start_t = time.time()
    for i in range(steps):
      y = model.iter_step(x, bc, None).detach()
    end_t = time.time()
    t = end_t - start_t
    print('Time: {}'.format(t))
    times.append(t)

  return times
Пример #18
0
 def execute(self, context):
     try:
         result = initialize()
         if result[0] == SUCCESS:
             pass
         else:
             self.report(type={result[0]}, message=result[1])
         return {'FINISHED'}
     except:
         return {"CANCELLED"}
Пример #19
0
    def post(self):
        json_obj = json_decode(self.request.body)
        # new dictionary
        response_to_send = initialize(json_obj['type'], json_obj['id'])

        print('Response to return')

        pprint.pprint(response_to_send)

        self.write(json.dumps(response_to_send))
Пример #20
0
def main():
    opt, logger, stats, vis = utils.build(is_train=True, tb_dir='tb_train')
    np.save(os.path.join(opt.ckpt_path, 'opt.npy'), opt)
    data_loader = get_data_loader(opt)
    logger.print('Loading data from {}'.format(opt.dset_path))
    print('####### Data loaded #########')
    # Validation
    val_opt = copy.deepcopy(opt)
    val_opt.is_train = False
    val_opt.data_limit = 20
    val_loader = get_data_loader(val_opt)

    model = HeatModel(opt)

    for epoch in range(opt.start_epoch, opt.n_epochs):
        model.setup(is_train=True)
        for step, data in enumerate(data_loader):
            bc, final, x = data['bc'], data['final'], data['x']
            f = None if 'f' not in data else data['f']
            x = utils.initialize(x, bc, opt.initialization)
            loss_dict = model.train(x, final, bc, f)
            if (step + 1) % opt.log_every == 0:
                print('Epoch {}, step {}'.format(epoch, step))
                vis.add_scalar(loss_dict, epoch * len(data_loader) + step)

        logger.print(
            ['[Summary] Epoch {}/{}:'.format(epoch, opt.n_epochs - 1)])

        # Evaluate
        if opt.evaluate_every > 0 and (epoch + 1) % opt.evaluate_every == 0:
            model.setup(is_train=False)
            # Find eigenvalues
            if opt.iterator != 'cg' and opt.iterator != 'unet':
                w, _ = utils.calculate_eigenvalues(model, image_size=15)
                w = sorted(np.abs(w))
                eigenvalues = {'first': w[-2], 'second': w[-3], 'third': w[-4]}
                vis.add_scalar({'eigenvalues': eigenvalues}, epoch)
                logger.print('Eigenvalues: {:.2f}, {:.3f}, {:.3f}, {:.3f}'\
                              .format(w[-1], w[-2], w[-3], w[-4]))

            # Evaluate entire val set
            results, images = evaluate(opt, model, val_loader, logger)
            vis.add_image({'errors': images['error_curves'][0]}, epoch + 1)
            vis.add_scalar(
                {
                    'steps': {
                        'Jacobi': results['Jacobi'],
                        'model': results['model']
                    },
                    'ratio': results['ratio']
                }, epoch + 1)

        if (epoch + 1) % opt.save_every == 0 or epoch == opt.n_epochs - 1:
            model.save(opt.ckpt_path, epoch + 1)
Пример #21
0
    def __init__(self, s_dim, a_dim):
        super(Net, self).__init__()

        self.s_dim = s_dim
        self.a_dim = a_dim

        self.cnn = nn.Sequential(
            nn.Conv2d(in_channels=4,
                      out_channels=16,
                      kernel_size=[8, 8],
                      stride=[4, 4],
                      padding=0),
            nn.ReLU(),
            nn.Conv2d(in_channels=16,
                      out_channels=32,
                      kernel_size=[4, 4],
                      stride=[2, 2],
                      padding=0),
            nn.ReLU(),
        )
        self.actor = nn.Sequential(nn.Linear(2592, 256), nn.ReLU(),
                                   nn.Linear(256, a_dim), nn.Softmax(dim=-1))
        self.critic = nn.Sequential(
            nn.Linear(2592, 256),
            nn.ReLU(),
            nn.Linear(256, 1),
        )
        initialize(self.cnn)
        initialize(self.actor)
        initialize(self.critic)
    def softmax_layer(self, h, y):
        """
        Perform Softmax over the hidden state in order to
        predict the next word in the sequence and compute
        the loss.
        :param h The hidden state sequence
        :param y The target words
        """
        hidden_to_output = Linear(name='hidden_to_output', input_dim=self.hidden_size,
                                  output_dim=self.vocab_size)
        initialize(hidden_to_output, sqrt(6.0 / (self.hidden_size + self.vocab_size)))

        linear_output = hidden_to_output.apply(h)
        linear_output.name = 'linear_output'
        softmax = NDimensionalSoftmax(name="lm_softmax")
        y_hat = softmax.log_probabilities(linear_output, extra_ndim=1)
        y_hat.name = 'y_hat'

        cost = softmax.categorical_cross_entropy(y, linear_output, extra_ndim=1).mean()

        cost.name = 'cost'
        return y_hat, cost
Пример #23
0
def run(args):
    if args['--local'] == True:
        args[
            '--base-path'] = '/Users/andrei/Google Drive/_Facultate/MPhil Cambridge/Dissertation/project'
    else:
        args[
            '--base-path'] = '/content/drive/My Drive/_Facultate/MPhil Cambridge/Dissertation/project'

    initialize(args, seed=0)

    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    OUT_DIR = path.join(args['--base-path'], 'logs')
    store = Store(OUT_DIR)

    if args['--model-to-train'] == 'mnist':
        model = MnistClassifier()
        optim = torch.optim.Adam(model.parameters(), lr=1e-3)

        train_dl = get_mnist_dl(args, train=True)
        valid_dl = get_mnist_dl(args, train=False)

        train_model(args, model, optim, train_dl, valid_dl, store, device)
Пример #24
0
def receive_message():
	
	if request.method == 'GET':
		token_sent = request.args.get("hub.verify_token")
		return verify_fb_token(token_sent)
	
	else:
		output = request.get_json()
		for event in output['entry']:
			messaging = event['messaging']
			for message in messaging:
				if message.get('message'):
					#Facebook Messenger ID for user so we know where to send response back to
					recipient_id = message['sender']['id']
					message_read(recipient_id)
					typing_on(recipient_id)
					time.sleep(1)
					
					if message['message'].get('text'):	
						time.sleep(1)
						input_text = message['message'].get('text').lower()
						typing_off(recipient_id)

						if input_text == 'yes':
							response_raw = utils.user_says_yes()
							quickreplies.send(recipient_id, response_raw['text'], response_raw['replyOptions'])

						elif input_text == 'no':
							response_raw = utils.user_says_no()
							quickreplies.send(recipient_id, response_raw['text'], response_raw['replyOptions'])

						elif 'hey' == input_text[:3] or 'hello' == input_text[:5] or 'hi' == input_text[:2]:
							response_raw = utils.initialize()
							quickreplies.send(recipient_id, response_raw['text'], response_raw['replyOptions'])

						else:
							response_raw = utils.process_user_input(input_text)
							quickreplies.send(recipient_id, response_raw[0], response_raw[1])
							# send_message(recipient_id, response_text)
					
					if message['message'].get('attachments'):
						# [{'type': 'image', 'payload': {'url': 'https://scontent.xx.fbcdn.net/...'}}
						send_message(recipient_id, "Please wait while we process your image!")
						typing_on(recipient_id)
						img_object = message['message'].get('attachments')[0]
						img_url = img_object['payload']['url']
						response_raw = utils.clarify_image(img_url)
						typing_off(recipient_id)
						quickreplies.send(recipient_id, response_raw[0], response_raw[1])
						# send_message(recipient_id, response_text)
	return "Message Processed"
Пример #25
0
def test_dist():
    np.random.seed(0)
    p1,p2,p3=(np.random.randn(3,1), np.random.randn(4,1), np.random.randn(5,1))
    q1,q2,q3=(np.random.randn(6,1), np.random.randn(7,1), np.random.randn(8,1))

    # p1,p2,p3=(np.random.randn(3), np.random.randn(4), np.random.randn(5))
    # q1,q2,q3=(np.random.randn(6), np.random.randn(7), np.random.randn(8))

    comm = MPI.COMM_WORLD
    assert comm.Get_size()==2
    if comm.Get_rank()==0:
        x1,x2,x3 = p1,p2,p3
    elif comm.Get_rank()==1:
        x1,x2,x3 = q1,q2,q3
    else:
        assert False

    rms = RunningMeanStd(epsilon=0.0, shape=(1,))
    U.initialize()

    rms.update(x1)
    rms.update(x2)
    rms.update(x3)

    bigvec = np.concatenate([p1,p2,p3,q1,q2,q3])

    def checkallclose(x,y):
        print(x,y)
        return np.allclose(x,y)

    assert checkallclose(
        bigvec.mean(axis=0),
        U.eval(rms.mean)
    )
    assert checkallclose(
        bigvec.std(axis=0),
        U.eval(rms.std)
    )
Пример #26
0
def evaluate(opt,
             model,
             data_loader,
             logger,
             error_threshold=0.05,
             limit=None,
             vis=None):
    '''
  Loop through the dataset and calculate evaluation metrics.
  '''
    if model.compare_model is not None:
        logger.print('Comparison: {} ({}), {} ({})'.format(\
                         model.iterator.name(), model.iterator.n_operations,
                         model.compare_model.name(), model.compare_model.n_operations))
    logger.print('Initialization: {}'.format(opt.initialization))
    logger.print('Error threshold: {}'.format(error_threshold))

    metric = utils.Metrics(scale=1, error_threshold=error_threshold)
    images = {'error_curves': [], 'results': []}

    for step, data in enumerate(data_loader):
        bc, gt, x = data['bc'], data['final'], data['x']
        f = None if 'f' not in data else data['f']
        if opt.initialization != 'random':
            # Test time: do not change data if 'random'
            x = utils.initialize(x, bc, opt.initialization)
        results, x = model.evaluate(x, gt, bc, f, opt.n_evaluation_steps)
        # Update metric
        metric.update(results)

        if step % opt.log_every == 0:
            img = utils.plot_error_curves(results, num=4)
            if vis is not None:
                vis.add_image({'errors_avg_init': img}, step)
            images['error_curves'].append(img)
            img = utils.plot_results({'x': x, 'gt': gt})
            if vis is not None:
                vis.add_image({'results': img}, step)
            images['results'].append(img)
        if (step + 1) % opt.log_every == 0:
            print('Step {}'.format(step + 1))
        if limit is not None and (step + 1) == limit:
            break

    # Get results
    results = metric.get_results()
    for key in results:
        logger.print('{}: {}'.format(key, results[key]))
    metric.reset()
    return results, images
Пример #27
0
def main(args):
    store, sc = initialize(update_everyone=True)
    try:
        max_attempts, attempt = 100, 1
        success = False
        while not success:
            success = create_meetings(store, sc, size=args.size,
                                      whos_out=args.whos_out,
                                      pairs=args.pairs,
                                      force_create=args.force_create,
                                      any_pair=attempt > max_attempts)
            attempt += 1
    finally:
        store.close()
Пример #28
0
    def __init__(self,
                 config,
                 model_name,
                 thr=None,
                 multi_gans=None,
                 gan_weights=None):
        # Updating settings
        G_batch_size = config['G_batch_size']
        n_classes = config['n_classes']

        # Loading GAN weights
        if multi_gans is None:
            self.G = utils.initialize(config, model_name)
        else:
            # Assuming that weight files follows the naming convention:
            # model_name_k, where k is in [0,multi_gans-1]
            self.G = [
                utils.initialize(config, model_name + "_%d" % k)
                for k in range(multi_gans)
            ]
        self.multi_gans = multi_gans
        self.gan_weights = gan_weights

        # Preparing sampling functions
        self.z_, self.y_ = utils.prepare_z_y(G_batch_size,
                                             config['dim_z'],
                                             n_classes,
                                             device='cuda',
                                             fp16=config['G_fp16'],
                                             z_var=config['z_var'],
                                             thr=thr)

        # Preparing fixed y tensors
        self.y_fixed = {
            y: utils.make_y(G_batch_size, y)
            for y in range(n_classes)
        }
Пример #29
0
def test_initialize():
    config = Namespace(
        model="squeezenet1_0",
        lr=1e-3,
        momentum=0.9,
        weight_decay=1e-4,
        num_iters_per_epoch=1,
        num_warmup_epochs=1,
        max_epochs=1,
    )
    model, optimizer, loss_fn, lr_scheduler = initialize(config)
    assert isinstance(model, nn.Module)
    assert isinstance(optimizer, optim.Optimizer)
    assert isinstance(loss_fn, nn.Module)
    assert isinstance(lr_scheduler, (_LRScheduler, ParamScheduler))
Пример #30
0
    def mutation(self, idx):
        t = copy.deepcopy(self.population[idx])
        gene = self.select_node(t)
        while gene is None:
            gene = self.select_node(t)
        parent = gene.up

        mutated_gene = initialize(self.terminal_symb, self.primitive_symbol)
        if parent is None:
            t = mutated_gene
        elif parent.left == gene:
            parent.left = mutated_gene
        else:
            parent.right = mutated_gene
        mutated_gene.up = parent
        return [t]
Пример #31
0
def main(args):
    store, sc = initialize(update_everyone=True)
    try:
        max_attempts, attempt = 100, 1
        success = False
        while not success:
            success = create_meetings(store,
                                      sc,
                                      size=args.size,
                                      whos_out=args.whos_out,
                                      pairs=args.pairs,
                                      force_create=args.force_create,
                                      any_pair=attempt > max_attempts)
            attempt += 1
    finally:
        store.close()
Пример #32
0
def test_heat():
    image_size = 65
    scale = np.random.uniform(350, 450) / (image_size**2)
    f = -gaussian(image_size) * scale
    f = torch.Tensor(f).unsqueeze(0)
    f = utils.pad_boundary(f, torch.zeros(1, 4))

    bc = torch.Tensor(np.random.rand(1, 4) * 80)
    x = torch.zeros(1, image_size + 2, image_size + 2)
    x = utils.set_boundary(x, bc)
    x = utils.initialize(x, bc, 'avg')

    y = x.clone()
    for i in range(2000):
        y = utils.fd_step(y, bc, None)

    z = x.clone()
    for i in range(4000):
        z = utils.fd_step(z, bc, f)

    # Au = 0
    A = utils.loss_kernel.view(1, 1, 3, 3)
    r = F.conv2d(y.unsqueeze(1), A).squeeze(1)
    error = torch.abs(r).max().item()
    print(error)

    # Au = f
    A = utils.loss_kernel.view(1, 1, 3, 3)
    r = F.conv2d(z.unsqueeze(1), A).squeeze(1) - f[:, 1:-1, 1:-1]
    error = torch.abs(r).max().item()
    print(error)

    y = (y / 100).numpy().squeeze(0)
    z = (z / 100).numpy().squeeze(0)

    plt.imshow(y)
    plt.colorbar()
    plt.show()

    plt.imshow(z)
    plt.colorbar()
    plt.show()
Пример #33
0
    def run(self):
        """
        Main method of the Word2Vec class.
        :return: the final values of the weights W1, W2 and a history of the value of the loss function vs. epoch
        """
        if len(self.corpus) == 0:
            raise ValueError('You need to specify a corpus of text.')

        corpus_tokenized, V = utl.tokenize(self.corpus)
        W1, W2 = utl.initialize(V, self.N)
        loss_vs_epoch = []
        for e in range(self.n_epochs):
            print(str(datetime.now()))
            print('Current epoch: ', str(e))
            loss = 0.
            for context, center in utl.corpus2io(corpus_tokenized, V, self.window):
                W1, W2, loss = self.method(context, center, W1, W2, loss)
            loss_vs_epoch.append(loss)

        return W1, W2, loss_vs_epoch
Пример #34
0
    def evaluate(self, x, gt, bc, f, n_steps):
        '''
    x, f, gt: size (batch_size x image_size x image_size)
    Run Jacobi and our iterator for n_steps iterations, and calculate errors.
    Return a dictionary of errors: size (batch_size x (n_steps + 1)).
    '''
        bc = bc.cuda()
        x = x.cuda()
        gt = gt.cuda()
        if f is not None:
            f = f.cuda()

        if utils.is_bc_mask(x, bc):
            print('Initializing with zero')
            x = utils.initialize(x, bc, 'zero')
        # Calculate starting error
        starting_error = utils.l2_error(x, gt).cpu()
        results = {}

        if self.iterator.name().startswith('UNet'):
            # Unet, set threshold to be higher
            threshold = 0.01
        else:
            threshold = 0.002

        if self.compare_model is not None:
            # Jacobi
            fd_errors, _ = utils.calculate_errors(x, bc, f, gt,
                                                  self.compare_model.iter_step,
                                                  n_steps, starting_error,
                                                  threshold)
            results['Jacobi errors'] = fd_errors

        # error of model
        errors, x = utils.calculate_errors(x, bc, f, gt, self.iter_step,
                                           n_steps, starting_error, threshold)
        results['model errors'] = errors

        return results, x
Пример #35
0
def main(args):
    """
    Initialize the shelf, possibly sync to s3, then check attendance, close
    the shelf and maybe sync the shelf again.

    Args:
        args (ArgumentParser args): Parsed arguments that impact how the check_attandance runs
    """
    if args.s3_sync:
        download_shelve_from_s3()

    if args.debug:
        logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(message)s")
    else:
        logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")

    store, sc = initialize(update_everyone=True)
    try:
        check_attendance(store, sc, users=args.users)
    finally:
        store.close()
        if args.s3_sync:
            upload_shelve_to_s3()
    return jsonify(obj)


@app.route('/crossref/<string:ht>',methods = ['GET'])
@crossdomain(origin='*')
def get_crossref(ht):
    obj = get_crossref_data_chart(ht)       
    return jsonify(obj)

@app.route('/contentdata/<string:ht>',methods = ['GET'])
@crossdomain(origin='*')
def get_contentdata(ht):
    contentArray = get_content_data_chart(ht)
    obj = {
        "data" : contentArray
    }       
    return jsonify(obj)

@app.route('/')
@crossdomain(origin='*')
def get_test():
    return "Testing the Seeding Food Studies Application"


if __name__ == '__main__':
    #handler = RotatingFileHandler('/home/sgqueen/seedfoodstudy/seedfoodstudywebservice/logs/seedfoodstudy.log', maxBytes=10000, backupCount=1)
    #handler.setLevel(logging.WARNING)
    #app.logger.addHandler(handler)
    initialize()
    app.run(host=os.environ['OPENSHIFT_PYTHON_IP'],port=int(os.environ['OPENSHIFT_PYTHON_PORT']),debug='false')
Пример #37
0
def main():
    """
    Initialize the shelf, possibly sync to s3, then check attendance, close
    the shelf and maybe sync the shelf again.

    Args:
        args (ArgumentParser args): Parsed arguments that impact how the check_attandance runs
    """
    if S3_BUCKET:
        download_shelve_from_s3()

    tz = timezone(TIMEZONE)
    store, sc = initialize(update_everyone=True)

    try:
        while True:
            # Get current time, and date of our last meeting
            now = datetime.now(tz)
            logging.info("It's now %s,", now.strftime(DATE_FMT))
            last_meeting = store["history"][-1]
            logging.info("and the last meeting was on %s.", last_meeting["date"].strftime(DATE_FMT))

            # Determine if it's time to check attendance
            attendance_time = all(
                [
                    (now.date() - last_meeting["date"]) >= FREQUENCY,
                    now.hour == ATTENDANCE_TIME["hour"],
                    now.minute == ATTENDANCE_TIME["minute"],
                    now.weekday() == ATTENDANCE_TIME["weekday"],
                ]
            )
            logging.info("Is it attendance checking time? %s", attendance_time)

            # Determine if it's time for a new meeting
            meeting_time = all(
                [
                    (now.date() - last_meeting["date"]) >= FREQUENCY,
                    now.hour == MEETING_TIME["hour"],
                    now.minute == MEETING_TIME["minute"],
                    now.weekday() == MEETING_TIME["weekday"],
                ]
            )
            logging.info("Is it meeting generating time? %s", meeting_time)

            sync = False
            if attendance_time:
                logging.info("Gonna check that attendance!")
                update_everyone_from_slack(store, sc)
                check_attendance(store, sc)
                sync = True
            elif meeting_time:
                logging.info("Let's try to generate a meeting!")
                update_everyone_from_slack(store, sc)
                max_attempts, attempt = 100, 1
                success = False
                while not success:
                    success = create_meetings(
                        store, sc, force_create=True, any_pair=attempt > max_attempts
                    )
                    attempt += 1
                sync = True

            if sync:
                logging.info("Syncing to local storage.")
                store.sync()
                if S3_BUCKET:
                    logging.info("Uploading to s3.")
                    upload_shelve_to_s3()

            # Go to sleep for a minute and check again
            logging.info("Going to sleep for a minute.")
            time.sleep(60)
    finally:
        store.close()
Пример #38
0
def main(args):
    store, sc = initialize(update_everyone=True)
    try:
        check_attendance(store, sc, users=args.users, debug=args.debug)
    finally:
        store.close()