Пример #1
0
def write_ppc_file(fname):
    """Save PYPOWER file. 
    """
    
    filename = fname
        
    base = os.path.basename(fname)
    casename = os.path.splitext(base)[0]
    
    outfile = open(fname, 'w', newline='')
    
    outfile.write('from numpy import array\n\n')
    
    outfile.write('def ' + casename + '():\n') 
    outfile.write('\tppc = {"version": ''2''}\n')
    outfile.write('\tppc["baseMVA"] = 100.0\n')
   
    outfile.write('\tppc["bus"] = ')
    outfile.write(np.array_repr(ppc["bus"]))
    outfile.write('\n\n')
     
    outfile.write('\tppc["gen"] = ')
    outfile.write(np.array_repr(ppc["gen"]))
    outfile.write('\n\n')
    
    outfile.write('\tppc["branch"] = ')
    outfile.write(np.array_repr(ppc["branch"]))
    outfile.write('\n\n')
    
    outfile.write('\treturn ppc')
    outfile.close()
    
    return True
Пример #2
0
 def __repr__(self):
   # if numpy.all(self == 0):
   #   # Bin-only output
   #   return "{}(bins={})".format(type(self).__name__, numpy.array_repr(self._bins))
   # else:
   if self.ndim == 1:
     return "{}({}, data={})".format(type(self).__name__,
       numpy.array_repr(self._bins)[len("array("):-1], 
       numpy.array_repr(self)[len(type(self).__name__)+1:-1])
   else:
     return "{}(({}), data={})".format(type(self).__name__,
       ",".join([numpy.array_repr(x)[6:-1] for x in self._bins]), 
       numpy.array_repr(self)[len(type(self).__name__)+1:-1])
def test():
    from sklearn.metrics import mean_squared_error
    import Surrogates.DataExtraction.pcs_parser as pcs_parser
    sp = pcs_parser.read(file("/home/eggenspk/Surrogates/Data_extraction/Experiments2014/hpnnet/smac_2_06_01-dev/nips2011.pcs"))
    # Read data from csv
    header, data = read_csv("/home/eggenspk/Surrogates/Data_extraction/hpnnet_nocv_convex_all/hpnnet_nocv_convex_all_fastrf_results.csv",
                            has_header=True, num_header_rows=3)
    para_header = header[0][:-2]
    type_header = header[1]
    cond_header = header[2]
    #print data.shape
    checkpoint = hash(numpy.array_repr(data))
    assert checkpoint == 246450380584980815

    model = GaussianProcess(sp=sp, encode=False, rng=1, debug=True)
    x_train_data = data[:100, :-2]
    y_train_data = data[:100, -1]
    x_test_data = data[100:, :-2]
    y_test_data = data[100:, -1]

    model.train(x=x_train_data, y=y_train_data, param_names=para_header)

    y = model.predict(x=x_train_data[1, :])
    print "Is: %100.70f, Should: %f" % (y, y_train_data[1])
    assert y[0] == 0.470745153514900149804844886602950282394886016845703125

    print "Predict whole data"
    y_whole = model.predict(x=x_test_data)
    mse = mean_squared_error(y_true=y_test_data, y_pred=y_whole)
    print "MSE: %100.70f" % mse
    assert mse == 0.006257598609004190459703664828339242376387119293212890625

    print "Soweit so gut"

    # Try the same with encoded features
    model = GaussianProcess(sp=sp, encode=True, rng=1, debug=True)
    #print data[:10, :-2]
    model.train(x=x_train_data, y=y_train_data, param_names=para_header)

    y = model.predict(x=x_train_data[1, :])
    print "Is: %100.70f, Should: %f" % (y, y_train_data[1])
    assert y[0] == 0.464671665294324409689608046392095275223255157470703125

    print "Predict whole data"
    y_whole = model.predict(x=x_test_data)
    mse = mean_squared_error(y_true=y_test_data, y_pred=y_whole)
    print "MSE: %100.70f" % mse
    assert mse == 0.00919265128042330570412588031103950925171375274658203125

    assert hash(numpy.array_repr(data)) == checkpoint
def myf(x):
	header = 'MaxStepX, TorsoWy, TorsoWx, StepHeight, Stiffness, MaxStepTheta, MaxStepY, MaxStepFrequency'
	print header
	print np.array_repr(x[0]).replace('\n', '').replace('\t', '')
	speed = input('Input 1/average_speed = ')
	output = np.array(float(speed))

	with open("data_new.py",'a') as f:
		np.savetxt(f, x, delimiter=",")
	# 	for item in x:
	# 		f.write("%s\n" % str(np.array_repr(item).replace('\n', '').replace('\t', '')))
	with open("readings_new.py",'a') as f:
		f.write("%s\n" % str(output))
	return output
def test():
    from sklearn.metrics import mean_squared_error
    import Surrogates.DataExtraction.pcs_parser as pcs_parser
    sp = pcs_parser.read(file("/home/eggenspk/Surrogates/Data_extraction/Experiments2014/hpnnet/smac_2_06_01-dev/nips2011.pcs"))
    # Read data from csv
    header, data = read_csv("/home/eggenspk/Surrogates/Data_extraction/hpnnet_nocv_convex_all/hpnnet_nocv_convex_all_fastrf_results.csv",
                            has_header=True, num_header_rows=3)
    para_header = header[0][:-2]
    type_header = header[1]
    cond_header = header[2]
    #print data.shape
    checkpoint = hash(numpy.array_repr(data))
    assert checkpoint == 246450380584980815

    model = GradientBoosting(sp=sp, encode=False, debug=True)
    x_train_data = data[:1000, :-2]
    y_train_data = data[:1000, -1]
    x_test_data = data[1000:, :-2]
    y_test_data = data[1000:, -1]

    model.train(x=x_train_data, y=y_train_data, param_names=para_header, rng=1)

    y = model.predict(x=x_train_data[1, :])
    print "Is: %100.70f, Should: %f" % (y, y_train_data[1])
    assert y[0] == 0.45366000254662230961599789225147105753421783447265625

    print "Predict whole data"
    y_whole = model.predict(x=x_test_data)
    mse = mean_squared_error(y_true=y_test_data, y_pred=y_whole)
    print "MSE: %100.70f" % mse
    assert mse == 0.00188246958253847243396073007914992558653466403484344482421875

    print "Soweit so gut"

    # Try the same with encoded features
    model = GradientBoosting(sp=sp, encode=True, debug=True)
    #print data[:10, :-2]
    model.train(x=x_train_data, y=y_train_data, param_names=para_header, rng=1)

    y = model.predict(x=x_train_data[1, :])
    print "Is: %100.70f, Should: %f" % (y, y_train_data[1])
    assert y[0] == 0.460818965082699205648708584703854285180568695068359375

    print "Predict whole data"
    y_whole = model.predict(x=x_test_data)
    mse = mean_squared_error(y_true=y_test_data, y_pred=y_whole)
    print "MSE: %100.70f" % mse
    assert mse == 0.002064362783199560034963493393433964229188859462738037109375

    assert hash(numpy.array_repr(data)) == checkpoint
  def testTransformerFeedForwardLayer(self):
    with self.session(use_gpu=True) as sess:
      tf.set_random_seed(3980847392)
      inputs = tf.random_normal([5, 2, 3], seed=948387483)
      paddings = tf.zeros([5, 2])
      p = layers_with_attention.TransformerFeedForwardLayer.Params()
      p.name = 'transformer_fflayer'
      p.input_dim = 3
      p.hidden_dim = 7
      transformer_fflayer = layers_with_attention.TransformerFeedForwardLayer(p)

      h = transformer_fflayer.FPropDefaultTheta(inputs, paddings)
      tf.global_variables_initializer().run()
      actual_layer_output = sess.run(h)
      # pylint: disable=bad-whitespace
      # pyformat: disable
      expected_output = [
          [[-0.88366592, -0.05049637,  0.01003706],
           [-0.10550675,  1.68050027,  2.29110384]],
          [[-1.30083609, -0.40521634,  0.1911681 ],
           [ 1.2597878 ,  1.45850968,  1.58734488]],
          [[ 0.10373873, -0.2716777 ,  0.2314173 ],
           [ 0.46293864, -0.06359965,  1.20189023]],
          [[ 0.3673597 , -0.1691664 ,  0.78656065],
           [-1.51081395, -0.70281881, -0.9093715 ]],
          [[-1.04800868, -0.70610946, -0.35321558],
           [-1.92480004,  0.08361804,  0.62713993]]]
      # pyformat: enable
      # pylint: enable=bad-whitespace
      print(np.array_repr(actual_layer_output))
      self.assertAllClose(actual_layer_output, expected_output)
Пример #7
0
 def __str__(self):
     """ prints compressed_data object content
     """
     output = 'Data: \n'
     output += array_repr(self.data[:], precision=3, suppress_small=True)
     output += '\n Compression level ' + str(self._compression_level) + '\n'
     return output
 def setUp(self):
     self._sp = pcs_parser.read(file(os.path.join(os.path.dirname(os.path.realpath(__file__)), "Testdata/nips2011.pcs")))
     # Read data from csv
     header, self._data = read_csv(os.path.join(os.path.dirname(os.path.realpath(__file__)), "Testdata/hpnnet_nocv_convex_all_fastrf_results.csv"),
                                   has_header=True, num_header_rows=3)
     self._para_header = header[0][:-2]
     self._checkpoint = hash(numpy.array_repr(self._data))
Пример #9
0
 def testSpectrumAugmenterWithFreqWarping(self):
   with self.session(use_gpu=False, graph=tf.Graph()):
     tf.random.set_seed(1234)
     inputs = tf.broadcast_to(
         tf.cast(tf.range(8), dtype=tf.float32), (5, 1, 8))
     inputs = tf.expand_dims(inputs, -1)
     paddings = tf.zeros([3, 2])
     p = spectrum_augmenter.SpectrumAugmenter.Params()
     p.name = 'specAug_layers'
     p.freq_mask_max_bins = 0
     p.time_mask_max_frames = 0
     p.freq_warp_max_bins = 4
     p.time_warp_max_frames = 0
     p.random_seed = 345678
     specaug_layer = p.Instantiate()
     # pyformat: disable
     # pylint: disable=bad-whitespace,bad-continuation
     expected_output = np.array(
         [[[0.0, 4.0, 4.5714283, 5.142857, 5.714286, 6.285714, 6.8571434,
            3.999998]],
          [[0.0, 0.8, 1.6, 2.4, 3.2, 4.0, 5.3333335, 6.6666665]],
          [[0.0, 0.6666667, 1.3333334, 2.0, 3.2, 4.4, 5.6000004, 6.8]],
          [[0.0, 1.3333334, 2.6666667, 4.0, 4.8, 5.6000004, 6.3999996,
            5.5999947]],
          [[0.0, 2.0, 2.857143, 3.7142859, 4.571429, 5.4285717, 6.2857146,
            5.999997]]])
     # pylint: enable=bad-whitespace,bad-continuation
     # pyformat: enable
     h, _ = specaug_layer.FPropDefaultTheta(inputs, paddings)
     actual_layer_output = self.evaluate(tf.squeeze(h, -1))
     print(np.array_repr(actual_layer_output))
     self.assertAllClose(actual_layer_output, expected_output)
Пример #10
0
 def testSpectrumAugmenterWarpMatrixConstructor(self):
   with self.session(use_gpu=False, graph=tf.Graph()):
     inputs = tf.broadcast_to(tf.cast(tf.range(10), dtype=tf.float32), (4, 10))
     origin = tf.cast([2, 4, 4, 5], dtype=tf.float32)
     destination = tf.cast([3, 2, 6, 8], dtype=tf.float32)
     choose_range = tf.cast([4, 8, 8, 10], dtype=tf.float32)
     p = spectrum_augmenter.SpectrumAugmenter.Params()
     p.name = 'specAug_layers'
     specaug_layer = p.Instantiate()
     # pyformat: disable
     # pylint: disable=bad-whitespace,bad-continuation
     expected_output = np.array(
         [[0.0000000, 0.6666667, 1.3333333, 2.0000000, 4.0000000,
           5.0000000, 6.0000000, 7.0000000, 8.0000000, 9.0000000],
          [0.0000000, 2.0000000, 4.0000000, 4.6666667, 5.3333333,
           6.0000000, 6.6666667, 7.3333333, 8.0000000, 9.0000000],
          [0.0000000, 0.6666667, 1.3333333, 2.0000000, 2.6666667,
           3.3333333, 4.0000000, 6.0000000, 8.0000000, 9.0000000],
          [0.0000000, 0.6250000, 1.2500000, 1.8750000, 2.5000000,
           3.1250000, 3.7500000, 4.3750000, 5.0000000, 7.5000000]])
     # pylint: enable=bad-whitespace,bad-continuation
     # pyformat: enable
     warp_matrix = specaug_layer._ConstructWarpMatrix(
         batch_size=4,
         matrix_size=10,
         origin=origin,
         destination=destination,
         choose_range=choose_range,
         dtype=tf.float32)
     outputs = tf.einsum('bij,bj->bi', warp_matrix, inputs)
     actual_layer_output = self.evaluate(outputs)
     print(np.array_repr(actual_layer_output))
     self.assertAllClose(actual_layer_output, expected_output)
Пример #11
0
  def __init__(self,q,x,f,qdesc="q",xdesc="x",fdesc="f"):
    """
    q    ... 1D-array of shape (q.size)
    x    ... 2D-array of shape (q.size,x.size)
    f    ... 2D-array of shape (q.size,x.size)
    *desc... description string for q,x,f
    """
    
    self.f = np.asarray(f,dtype='float');
    self.x = np.asarray(x,dtype='float');
    self.q = np.asarray(q,dtype='float');
    self.fdesc = fdesc;
    self.xdesc = xdesc;
    self.qdesc = qdesc;

    # complete input data (if constant for different q)
    if (len(self.x.shape)==1):
      self.x=np.tile(self.x,(len(self.q),1));
    if (len(self.f.shape)==1):
      self.f=np.tile(self.f,(len(self.q),1));

    # test shape of input data
    if (self.q.shape[0] <> self.x.shape[0]) or \
       (self.x.shape    <> self.f.shape):
      raise ValueError("Invalid shape of arguments.");

    # test for double parameters
    if (np.unique(self.q).size < self.q.size):
      raise ValueError("Parameters are not unique: \n " + np.array_repr(np.sort(self.q)));
Пример #12
0
    def write(self):
        outdict = {'model'            : self.model,
                   'data_out'         : {'q'       : json.dumps(self.q_vals_out),
                                         'i'       : json.dumps(self.i_vals_out),
                                         'units'   : 'A^-1'},
                   'run'              : {'command' : self.command,
                                         'date'    : str(datetime.date.today()),
                                         'time'    : str(datetime.time())},
                   'parameters_in'    : self.parameters_in}

        if self.dataset:
            outdict['dataset'] = {'q_in' : json.dumps(self.datain.q),
                                  'i_in' : json.dumps(self.datain.i)}
            
        if (self.fitsuccess and (self.command == 'fit')):
            outdict['fit'] = {'chi^2'          : self.chisqr,
                              'cov_x'          : np.array_repr(self.cov_x),
                              'parameters_out' : self.parameters}

        path, filename = os.path.split(self.outpath)
        if filename == '':
             filename = self.outfile
             
        if not os.path.exists(path):
            os.mkdir(path)

        if self.xml:
            self.write_cml(path, filename)

        else:
            f = open(os.path.join(path, filename), 'w')
            json.dump(outdict, f)
            f.close()
Пример #13
0
    def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32):
        with self.session() as sess:
            tf.set_random_seed(_TF_RANDOM_SEED)
            p = self._testParams()
            p.dtype = dtype
            if fprop_dtype:
                p.fprop_dtype = fprop_dtype
                p.input.dtype = fprop_dtype
            mdl = p.cls(p)
            mdl.FProp(mdl.theta)
            loss = mdl.loss
            logp = mdl.eval_metrics['log_pplx'][0]
            tf.global_variables_initializer().run()
            vals = []
            for _ in range(5):
                vals += [sess.run((loss, logp))]

            print('actual vals = %s' % np.array_repr(np.array(vals)))
            self.assertAllClose(vals, [(189.22296, 10.368382),
                                       (282.57202, 10.369616),
                                       (142.55638, 10.367737),
                                       (139.9939, 10.369918),
                                       (293.08011, 10.374517)],
                                atol=1e-6,
                                rtol=1e-6)
Пример #14
0
 def testSpectrumAugmenterWithFrequencyMask(self):
     with self.session(use_gpu=False, graph=tf.Graph()) as sess:
         tf.compat.v1.set_random_seed(1234)
         inputs = tf.ones([3, 5, 4, 2], dtype=tf.float32)
         paddings = tf.zeros([3, 5])
         p = spectrum_augmenter.SpectrumAugmenter.Params()
         p.name = 'specAug_layers'
         p.freq_mask_max_bins = 6
         p.time_mask_max_frames = 0
         specaug_layer = p.Instantiate()
         expected_output = np.array(
             [[[[1., 1.], [1., 1.], [1., 1.], [1., 1.]],
               [[1., 1.], [1., 1.], [1., 1.], [1., 1.]],
               [[1., 1.], [1., 1.], [1., 1.], [1., 1.]],
               [[1., 1.], [1., 1.], [1., 1.], [1., 1.]],
               [[1., 1.], [1., 1.], [1., 1.], [1., 1.]]],
              [[[1., 1.], [0., 0.], [0., 0.], [0., 0.]],
               [[1., 1.], [0., 0.], [0., 0.], [0., 0.]],
               [[1., 1.], [0., 0.], [0., 0.], [0., 0.]],
               [[1., 1.], [0., 0.], [0., 0.], [0., 0.]],
               [[1., 1.], [0., 0.], [0., 0.], [0., 0.]]],
              [[[0., 0.], [0., 0.], [0., 0.], [1., 1.]],
               [[0., 0.], [0., 0.], [0., 0.], [1., 1.]],
               [[0., 0.], [0., 0.], [0., 0.], [1., 1.]],
               [[0., 0.], [0., 0.], [0., 0.], [1., 1.]],
               [[0., 0.], [0., 0.], [0., 0.], [1., 1.]]]])
         h, _ = specaug_layer.FPropDefaultTheta(inputs, paddings)
         actual_layer_output = sess.run(h)
         print(np.array_repr(actual_layer_output))
         self.assertAllClose(actual_layer_output, expected_output)
Пример #15
0
    def __repr__(self):
        """representation a mdf_skeleton class data strucutre

        Returns:
        ------------
        string of mdf class ordered as below
        master_channel_name
            channel_name   description
            numpy_array    unit
        """
        output = 'file name : ' + self.fileName + '\n'
        for m in self.file_metadata.keys():
            output += m + ' : ' + str(self.file_metadata[m]) + '\n'
        output += '\nchannels listed by data groups:\n'
        for d in self.masterChannelList.keys():
            if d is not None:
                output += d + '\n'
            for c in self.masterChannelList[d]:
                output += '  ' + c + ' : '
                desc = self.getChannelDesc(c)
                if desc is not None:
                    output += str(desc)
                output += '\n    '
                data = self.getChannelData(c)
                if data.dtype.kind != 'V': # not byte, impossible to represent
                    output += array_repr(data, \
                        precision=3, suppress_small=True)
                unit = self.getChannelUnit(c)
                output += ' ' + unit + '\n'
        return output
Пример #16
0
  def testDecoderSampleTargetSequences(self):
    p = self._DecoderParams(
        vn_config=py_utils.VariationalNoiseParams(None, False, False),
        num_classes=8)
    p.target_seq_len = 5
    p.random_seed = 1
    config = tf.ConfigProto(
        graph_options=tf.GraphOptions(
            optimizer_options=tf.OptimizerOptions(do_function_inlining=False)))
    with self.session(use_gpu=False, config=config) as sess:
      tf.set_random_seed(8372740)
      np.random.seed(35315)
      dec = p.Instantiate()
      source_sequence_length = 5
      batch_size = 4
      source_encodings = tf.constant(
          np.random.normal(
              size=[source_sequence_length, batch_size, p.source_dim]),
          dtype=tf.float32)
      source_encoding_padding = tf.constant(
          [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0],
           [0.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]],
          dtype=tf.float32)
      encoder_outputs = py_utils.NestedMap(
          encoded=source_encodings, padding=source_encoding_padding)
      sampled_sequences = dec.SampleTargetSequences(
          dec.theta, encoder_outputs, random_seed=tf.to_int32(123))
      self.assertAllEqual([batch_size, p.target_seq_len],
                          sampled_sequences.ids.shape)
      tf.global_variables_initializer().run()
      decoder_output = sess.run(sampled_sequences)
      print('ids=%s' % np.array_repr(decoder_output.ids))
      lens = np.sum(1 - decoder_output.paddings, axis=1)
      print('lens=%s' % lens)
      # pyformat: disable
      # pylint: disable=bad-whitespace,bad-continuation
      expected_ids = [[6, 2, 2, 2, 2],
                      [0, 0, 7, 5, 1],
                      [6, 1, 5, 1, 5],
                      [6, 7, 7, 4, 4]]
      # pylint: enable=bad-whitespace,bad-continuation
      # pyformat: enable
      expected_lens = [2, 5, 5, 5]
      self.assertAllEqual(expected_lens, lens)
      self.assertAllEqual(expected_ids, decoder_output.ids)

      # Sample again with the same random seed.
      decoder_output2 = sess.run(
          dec.SampleTargetSequences(
              dec.theta, encoder_outputs, random_seed=tf.to_int32(123)))
      # Get the same output.
      self.assertAllEqual(decoder_output.ids, decoder_output2.ids)
      self.assertAllEqual(decoder_output.paddings, decoder_output2.paddings)

      # Sample again with a different random seed.
      decoder_output3 = sess.run(
          dec.SampleTargetSequences(
              dec.theta, encoder_outputs, random_seed=tf.to_int32(123456)))
      # Get different sequences.
      self.assertNotAllClose(expected_ids, decoder_output3.ids)
Пример #17
0
    def __repr__(self) -> str:

        name_class = type(self).__name__

        repr_point = np.array_repr(self.point)

        return f"{name_class}(point={repr_point}, radius={self.radius})"
Пример #18
0
 def testBeamSearchHelper(self):
   with self.session(use_gpu=False) as sess:
     topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults(
         sess, num_hyps_per_beam=3)
     print(np.array_repr(topk_ids))
     print(np.array_repr(topk_lens))
     print(np.array_repr(topk_scores))
     expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0],
                          [4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2],
                          [6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]]
     expected_topk_lens = [5, 4, 4, 7, 6, 6]
     expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776],
                             [9.74691486, 8.46679497, 7.14809656]]
     self.assertAllEqual(expected_topk_ids, topk_ids.tolist())
     self.assertAllEqual(expected_topk_lens, topk_lens.tolist())
     self.assertAllClose(expected_topk_scores, topk_scores)
Пример #19
0
    def __init__(self, q, x, f, qdesc="q", xdesc="x", fdesc="f"):
        """
    q    ... 1D-array of shape (q.size)
    x    ... 2D-array of shape (q.size,x.size)
    f    ... 2D-array of shape (q.size,x.size)
    *desc... description string for q,x,f
    """

        self.f = np.asarray(f, dtype='float')
        self.x = np.asarray(x, dtype='float')
        self.q = np.asarray(q, dtype='float')
        self.fdesc = fdesc
        self.xdesc = xdesc
        self.qdesc = qdesc

        # complete input data (if constant for different q)
        if (len(self.x.shape) == 1):
            self.x = np.tile(self.x, (len(self.q), 1))
        if (len(self.f.shape) == 1):
            self.f = np.tile(self.f, (len(self.q), 1))

        # test shape of input data
        if (self.q.shape[0] <> self.x.shape[0]) or \
           (self.x.shape    <> self.f.shape):
            raise ValueError("Invalid shape of arguments.")

        # test for double parameters
        if (np.unique(self.q).size < self.q.size):
            raise ValueError("Parameters are not unique: \n " +
                             np.array_repr(np.sort(self.q)))
Пример #20
0
def plot(points, step, h, w):
  new_p = []
  for p in points:
    new_p.append({
      "x": p["x"] + (p["vel_x"] * step),
	  "y": p["y"] + (p["vel_y"] * step)
    })

  max_x = max(p['x'] for p in new_p)
  max_y = max(p['y'] for p in new_p)
  min_x = min(p["x"] for p in new_p)
  min_y = min(p["y"] for p in new_p)
	
  res = False
  
  # If it fis between the bounds, print it out
  if max_y - min_y <= h and max_x - min_x <= w:
    array = numpy.zeros((h, w))
    res = True
    for p in new_p:
      array[p['y'] - min_y][p['x'] - min_x] = 1

    # Print nicely
    for ar in array:
      print(numpy.array_repr(ar).replace('\n', '').replace('\t', '').replace("      ", "").replace("0.", "  "))

  return res
Пример #21
0
    def _testLayerHelper(self,
                         test_case,
                         sess,
                         p,
                         expected=None,
                         not_expected=None,
                         global_step=-1):
        tf.set_random_seed(398847392)
        np.random.seed(12345)
        p.name = 'proj'
        p.input_dim = 3
        p.output_dim = 4
        p.params_init = py_utils.WeightInit.Gaussian(0.1)
        l = p.cls(p)
        in_padding = tf.zeros([2, 4, 1], dtype=tf.float32)
        in_padding = tf.constant([[[0], [0], [1], [0]], [[1], [1], [0], [0]]],
                                 dtype=tf.float32)
        inputs = tf.constant(np.random.normal(0.1, 0.5, [2, 4, 3]),
                             dtype=tf.float32)
        output = l.FPropDefaultTheta(inputs, in_padding)
        tf.global_variables_initializer().run()

        if global_step >= 0:
            sess.run(
                tf.assign(py_utils.GetOrCreateGlobalStepVar(), global_step))

        output = output.eval()
        print('QuantizableLayerTest output', test_case, ':\n',
              np.array_repr(output))
        if expected is not None:
            self.assertAllClose(output, expected)
        if not_expected is not None:
            self.assertNotAllClose(output, not_expected)
        return l
Пример #22
0
def FindResultFromList(result, expected_results):
  """Find the given result from a list of expected results.

  Args:
    result: A MassOutput tuple, from running ops.mass().
    expected_results: A list of MassOutput.  The test asserts `result` is equal
      to at least one result from `expected_results`.

  Returns:
    The index of first match found, or None for not found.

  We use this when the specific output from ops.mass() is not stable across
  different platforms. Specifically, the implementation currently uses
  std::shuffle(), which have different implementations between libc++ and
  stdlibc++.
  """
  for idx, expected in enumerate(expected_results):
    match = True
    for attr in MassOutput._fields:
      if not np.array_equal(getattr(result, attr), getattr(expected, attr)):
        match = False
        break
    if match:
      return idx

  tf.logging.error('Found unexpected output from op.mass that fails to match'
                   ' any expected result.')
  for attr in MassOutput._fields:
    tf.logging.info('%s = %s', attr, np.array_repr(getattr(result, attr)))
  return None
Пример #23
0
  def testTransformerFeedForwardLayerSpecOutDim(self):
    with self.session(use_gpu=True) as sess:
      tf.set_random_seed(3980847392)
      inputs = tf.random_normal([5, 2, 3], seed=948387483)
      paddings = tf.zeros([5, 2])
      p = layers_with_attention.TransformerFeedForwardLayer.Params()
      p.name = 'transformer_fflayer'
      p.input_dim = 3
      p.output_dim = 5
      p.hidden_dim = 7
      transformer_fflayer = layers_with_attention.TransformerFeedForwardLayer(p)

      h = transformer_fflayer.FPropDefaultTheta(inputs, paddings)
      tf.global_variables_initializer().run()
      actual_layer_output = sess.run(h)
      # pylint: disable=bad-whitespace
      # pyformat: disable
      expected_output = [
          [[ 1.42697251,  0.79269135, -0.85500956, -0.8122285 , -1.56555367],
           [-1.7876718 ,  0.26025945, -3.18244219,  1.34756351,  0.25739765]],
          [[ 1.27962363,  0.88677615, -1.23556185, -1.06855559, -1.27293301],
           [ 0.89336467,  2.46229172,  0.11302143,  1.19385004, -2.37805009]],
          [[ 2.80146003, -0.66912627,  1.50160134, -2.30645609, -1.18872762],
           [ 1.61967182, -0.51639485,  0.24441491, -1.0871532 , -0.95539457]],
          [[ 2.03333473, -0.78205228,  0.71245927, -1.63276744, -0.91654319],
           [ 1.54542768, -0.30343491,  0.10666496, -1.67965126, -0.15671858]],
          [[ 1.60873222, -1.88402128,  0.79040933, -1.97199082,  0.4778356 ],
           [-0.13516766, -0.42583361, -1.86275542, -1.09650302,  0.83263111]]]
      # pyformat: enable
      # pylint: enable=bad-whitespace
      print(np.array_repr(actual_layer_output))
      self.assertAllClose(actual_layer_output, expected_output)
Пример #24
0
    def showProjectionDialog(self):
        """Get and set OpenGL ModelView matrix and focus.
        Useful for setting two different instances to the exact same projection"""
        dlg = uic.loadUi("multilineinputdialog.ui")
        dlg.setWindowTitle("Get and set OpenGL ModelView matrix and focus")
        precision = 8  # use default precision
        MV_repr = np.array_repr(self.MV, precision=precision)
        focus_repr = np.array_repr(self.focus, precision=precision)
        txt = "self.MV = \\\n" "%s\n\n" "self.focus = %s" % (MV_repr, focus_repr)
        dlg.plainTextEdit.insertPlainText(txt)
        dlg.plainTextEdit.selectAll()
        if dlg.exec_():  # returns 1 if OK, 0 if Cancel
            txt = str(dlg.plainTextEdit.toPlainText())
            from numpy import array, float32  # required for exec()

            exec(txt)  # update self.MV and self.focus, with hopefully no maliciousness
Пример #25
0
    def testBiEncoderForwardPassWithDropout(self):
        with self.session(use_gpu=False):
            tf.set_random_seed(8372749040)
            p = self._BiEncoderParams()
            p.dropout_prob = 0.5
            mt_enc = encoder.MTEncoderBiRNN(p)
            batch = py_utils.NestedMap()
            batch.ids = tf.transpose(tf.reshape(tf.range(0, 8, 1), [4, 2]))
            batch.paddings = tf.zeros([2, 4])
            enc_out = mt_enc.FPropDefaultTheta(batch).encoded

            tf.global_variables_initializer().run()
            actual_enc_out = enc_out.eval()
            print('bi_enc_actual_enc_out_with_dropout',
                  np.array_repr(actual_enc_out))
            # pylint: disable=bad-whitespace,bad-continuation
            # pyformat: disable
            expected_enc_out = [[[-2.25614094e-05, 1.19781353e-05],
                                 [-2.74532852e-07, 8.17993077e-06]],
                                [[2.66865045e-05, 1.02941645e-04],
                                 [1.51371260e-05, 3.78371587e-05]],
                                [[3.50117516e-05, 7.65562072e-06],
                                 [-1.30227636e-05, 3.01171349e-06]],
                                [[2.27566215e-06, 1.42354111e-07],
                                 [1.04521234e-06, 2.50320113e-06]]]
            # pyformat: enable
            # pylint: enable=bad-whitespace,bad-continuation
            self.assertAllClose(expected_enc_out, actual_enc_out)
Пример #26
0
  def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32):
    with self.session() as sess:
      tf.set_random_seed(_TF_RANDOM_SEED)
      p = self._testParams()
      p.dtype = dtype
      if fprop_dtype:
        p.fprop_dtype = fprop_dtype
        p.input.dtype = fprop_dtype
      mdl = p.Instantiate()
      input_batch = mdl.GetInputBatch()
      mdl.FProp(mdl.theta, input_batch)
      loss = mdl.loss
      logp = mdl.eval_metrics['log_pplx'][0]
      tf.global_variables_initializer().run()
      vals = []
      for _ in range(5):
        vals += [sess.run((loss, logp))]

      print('actual vals = %s' % np.array_repr(np.array(vals)))
      self.assertAllClose(vals, [
          [233.337143, 10.370541],
          [235.853119, 10.367168],
          [217.87796, 10.375141],
          [217.822205, 10.372487],
          [159.483185, 10.37289],
      ])
Пример #27
0
  def testEvolvedTransformerDecoderBranchedConvsLayer(self):
    layer = layers_with_attention.EvolvedTransformerDecoderBranchedConvsLayer
    with self.session(use_gpu=True) as sess:
      tf.set_random_seed(3980847392)
      inputs = tf.random_normal([5, 2, 3], seed=948387483)
      paddings = tf.zeros([5, 2])
      p = layer.Params()
      p.name = 'et_decoder_branched_convs'
      p.input_dim = 3
      et_branched_convs = layer(p)

      h = et_branched_convs.FPropDefaultTheta(inputs, paddings)
      tf.global_variables_initializer().run()
      actual_layer_output = sess.run(h)
      # pylint: disable=bad-whitespace
      # pyformat: disable
      expected_output = [
          [[-0.31987068, -0.65715098,  0.90350437],
           [ 0.00773269,  1.07779562,  4.11094666]],
          [[-0.84862059, -0.93186408,  1.16371167],
           [ 1.31467259,  0.03560367,  2.36822462]],
          [[ 0.02183507, -0.0799394 , -1.68870354],
           [ 0.77921551,  1.30145741, -0.86353606]],
          [[ 0.31672907,  0.50000876, -0.93973017],
           [-0.54707348,  0.19211179, -1.45307386]],
          [[-0.46405494,  0.65833056, -1.09345317],
           [-1.17221224, -0.08027397,  0.84021652]]]
      # pyformat: enable
      # pylint: enable=bad-whitespace
      print(np.array_repr(actual_layer_output))
      self.assertAllClose(actual_layer_output, expected_output)
Пример #28
0
    def __repr__(self):
        '''
        this does not print parent. mode is printed as ha.modes['name']
        '''

        return "Star(HylaaSettings({}, {}), {}, {}, {}, None, ha.modes['{}'], extra_init=({}, {}, {}))".format(
            self.settings.step,
            self.settings.step * self.settings.num_steps,
            array_repr(self.center),
            array_repr(self.basis_matrix),
            self.constraint_list,
            self.mode.name,
            array_repr(self.start_basis_matrix) if self.start_basis_matrix is not None else None,
            self.total_steps,
            self.fast_forward_steps
            )
Пример #29
0
  def testEvolvedTransformerEncoderLayerFProp(self):
    with self.session(use_gpu=True) as sess:
      np.random.seed(6348575)
      depth = 4
      p = GPipeEvolvedTransformerEncoderLayer.Params()
      p.name = 'gpipe_evolved_transformer_encoder'
      p.source_dim = depth
      p.transformer_tpl.tr_fflayer_tpl.hidden_dim = 7
      p.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
      transformer = GPipeEvolvedTransformerEncoderLayer(p)

      (source_vecs, source_padding, _, _) = self._testInputs(depth=depth)

      h = transformer.FPropDefaultTheta(source_vecs, source_padding, None)[0]

      tf.global_variables_initializer().run()
      actual_layer_output = sess.run([h])[0]
      tf.logging.info(np.array_repr(actual_layer_output))
      # pylint: disable=bad-whitespace
      # pyformat: disable
      expected_layer_output = [
          [[-2.03854632, -1.07184005, -0.28417355,  0.17936069],
           [-0.74067241, -1.48318326,  0.26369774,  0.62173623]],
          [[-2.12831736, -0.86353737, -0.54453588,  0.13070297],
           [-0.76326936, -0.04828247, -0.49510449,  1.20852029]],
          [[ 0.85539216, -1.21577334, -1.28910851, -0.15619087],
           [-1.45574117, -1.11208296,  0.71455258,  0.91494167]],
          [[-1.21304905, -1.37239563,  0.7022025 ,  0.16537377],
           [ 3.07106829,  1.35782909, -0.9944036 , -2.28987551]],
          [[-0.13129801, -1.70681071, -0.42324018,  1.32114363],
           [-1.53065133,  0.18422687, -0.93387115,  1.37142754]]]
      # pyformat: enable
      # pylint: enable=bad-whitespace
      self.assertAllClose(expected_layer_output, actual_layer_output)
Пример #30
0
    def store_img(self,
                  img,
                  label,
                  poly,
                  keypoints=None,
                  processed_img=None,
                  rel_path=None):
        '''
            img: the image
            poly: polygon points
            path: path ending with name (not extension) which will be used for prepending pickle, processed, etc
            label: name
            processed_img: the processed image. Will be saved for reference
        '''
        if rel_path is None:
            rel_path = hashlib.sha256(np.array_repr(img).encode()).hexdigest()
        path = os.path.join(self.template_dir, rel_path)
        if len(path.split('.jpg')) > 1:
            path = path.split('.jpg')[0]
        img = ImageDB.Image(path, img, label, poly, keypoints)
        self.images.append(img)

        if processed_img is not None:
            cv2.imwrite(path + '_processes.jpg', processed_img)

        # store pickle
        with open(path + '.pickle', 'wb') as f:
            pickle.dump(img, f)
Пример #31
0
 def testhomos(self):
     """Are the H**O indices equal to 34 and 33 (one more alpha electron
     than beta electron)?
     """
     msg = "%s != array([34, 33], 'i')" % numpy.array_repr(self.data.homos)
     numpy.testing.assert_array_equal(self.data.homos,
                                      numpy.array([34, 33], "i"), msg)
Пример #32
0
def repr_ndarray(x, _helper):
    # noinspection PyPackageRequirements
    import numpy as np

    dims = len(x.shape)
    if (
            # Too many dimensions to be concise
            dims > 6 or
            # There's a bug with array_repr and matrices
            isinstance(x, np.matrix)
            and np.lib.NumpyVersion(np.__version__) < '1.14.0' or
            # and with masked arrays...
            isinstance(x, np.ma.MaskedArray)):
        name = type_name(x)
        if name == 'ndarray':
            name = 'array'
        return '%s(%r, shape=%r)' % (name, x.dtype, x.shape)

    edgeitems = repr_ndarray.maxparts // 2
    if dims == 3:
        edgeitems = min(edgeitems, 2)
    elif dims > 3:
        edgeitems = 1

    opts = np.get_printoptions()
    try:
        np.set_printoptions(threshold=repr_ndarray.maxparts,
                            edgeitems=edgeitems)
        return np.array_repr(x)
    finally:
        np.set_printoptions(**opts)
Пример #33
0
  def testEvolvedTransformerEncoderBranchedConvsLayer(self):
    layer = layers_with_attention.EvolvedTransformerEncoderBranchedConvsLayer
    with self.session(use_gpu=True) as sess:
      tf.set_random_seed(3980847392)
      inputs = tf.random_normal([5, 2, 3], seed=948387483)
      paddings = tf.zeros([5, 2])
      p = layer.Params()
      p.name = 'et_encoder_branched_convs'
      p.input_dim = 3
      et_branched_convs = layer(p)

      h = et_branched_convs.FPropDefaultTheta(inputs, paddings)
      tf.global_variables_initializer().run()
      actual_layer_output = sess.run(h)
      # pylint: disable=bad-whitespace
      # pyformat: disable
      expected_output = [
          [[-0.13232423, -0.46060669,  0.72598207],
           [ 0.6725747 ,  1.58664441,  2.64087844]],
          [[-0.21702465, -0.68267912,  1.20886588],
           [ 1.69793618,  0.53306532,  1.02958691]],
          [[-0.46037287, -0.42950529, -1.68443251],
           [ 0.21459752,  0.42246291, -0.01271994]],
          [[-0.23293658,  0.15300342, -0.83518255],
           [-0.48914853, -0.44239512, -0.2328119 ]],
          [[-0.57934833,  0.24165238, -1.05392623],
           [-0.8292231 ,  0.06175411,  1.28672981]]]
      # pyformat: enable
      # pylint: enable=bad-whitespace
      print(np.array_repr(actual_layer_output))
      self.assertAllClose(actual_layer_output, expected_output)
Пример #34
0
  def testForwardPassWithStackingAfterMiddleLayer(self):
    with self.session(use_gpu=False):
      vn_config = py_utils.VariationalNoiseParams(None, False, False)
      p = self._EncoderParams(vn_config)
      p.stacking_layer_tpl.left_context = 1
      p.stacking_layer_tpl.right_context = 0
      p.stacking_layer_tpl.stride = 2
      p.layer_index_before_stacking = 0
      enc_out = self._ForwardPass(p).encoded
      enc_out_sum = tf.reduce_sum(enc_out, 0)

      tf.global_variables_initializer().run()

      # pyformat: disable
      # pylint: disable=bad-whitespace
      expected_enc_out = [
          [0.00102275, -0.02697385, 0.01709868, -0.00939053, -0.01576837,
           0.0070826, -0.00626193, 0.01143604, -0.01742513, -0.00529445,
           0.00284249, -0.01362027, -0.00490865, 0.0216262, -0.01344598,
           -0.00460993, -0.01329017, 0.01379208, -0.00850593, 0.0193335,
           0.01134925, -0.00131254, 0.00375953, -0.00588882, 0.01347932,
           -0.00252493, 0.01274828, 0.01027388, 0.02657663, 0.02644286,
           0.0286899, -0.00833998],
          [-0.01801126, 0.0115137, 0.01355767, 0.00113954, 0.00986663,
           -0.0128988, 0.00794239, -0.00524312, 0.00246279, -0.00575782,
           -0.00213567, -0.01528412, 0.00186096, 0.00253562, -0.00411006,
           -0.00390748, -0.01001569, -0.00344393, -0.01211706, 0.00387725,
           0.02194905, 0.02578988, -0.00255773, 0.00690117, 0.00976908,
           0.01935913, 0.01131854, 0.0013859, -0.01567556, 0.01858256,
           0.02251371, -0.0185001]]
      # pylint: enable=bad-whitespace
      # pyformat: enable
      enc_out_sum_val = enc_out_sum.eval()
      print('enc_out_sum_val', np.array_repr(enc_out_sum_val))
      self.assertAllClose(expected_enc_out, enc_out_sum_val)
Пример #35
0
    def testTransformerAttentionLayerCase2(self):
        with self.session(use_gpu=True) as sess:
            depth = 4
            p = layers_with_attention.TransformerAttentionLayer.Params()
            p.name = 'transformer_atten'
            p.source_dim = depth
            p.is_masked = True
            p.num_attention_heads = 2
            transformer_atten = layers_with_attention.TransformerAttentionLayer(
                p)

            (source_vecs, source_padding, _,
             _) = self._testTransformerAttentionLayerInputs(depth=depth)
            ctx, probs = transformer_atten.FPropDefaultTheta(
                source_vecs, source_padding)
            tf.global_variables_initializer().run()
            actual_ctx, actual_probs = sess.run([ctx, probs])
            tf.logging.info(np.array_repr(actual_ctx))
            tf.logging.info(np.array_repr(actual_probs))
            # pylint: disable=bad-whitespace
            # pyformat: disable
            expected_ctx = [
                [[-0.14429152, 1.15510106, 1.11930299, -1.19245839],
                 [-0.69580591, -0.47006619, 0.82592297, 0.69593251]],
                [[0.24164687, 0.53328454, -1.02119482, -1.49412084],
                 [-0.82601064, 0.024203, -1.11880171, 1.80784416]],
                [[1.7644347, -0.53346401, -1.1461122, -1.42797422],
                 [-0.95326459, 0.39580142, 0.39262164, 0.67513674]],
                [[-0.28252155, -0.95237327, 2.08757687, -0.21231559],
                 [1.4362365, 0.46009994, -1.45436597, -1.90602148]],
                [[-0.51681399, -0.70075679, -0.48352116, 1.93754733],
                 [-1.44486678, 0.81801879, -1.03079689, 1.86697066]]
            ]
            expected_probs = [[[1., 0., 0., 0., 0.], [0.2, 0.2, 0.2, 0.2,
                                                      0.2]],
                              [[0.3966811, 0.60331887, 0., 0., 0.],
                               [0., 1., 0., 0., 0.]],
                              [[0.41050252, 0.58949745, 0., 0., 0.],
                               [0., 0.5245893, 0.4754107, 0., 0.]],
                              [[0.58882225, 0.41117775, 0., 0., 0.],
                               [0., 0.31849149, 0.28174096, 0.39976761, 0.]],
                              [[0.16272782, 0.15781289, 0., 0., 0.67945927],
                               [0., 0.55003977, 0.26049581, 0.18946445, 0.]]]
            # pyformat: enable
            # pylint: enable=bad-whitespace
            self.assertAllClose(expected_ctx, actual_ctx)
            self.assertAllClose(expected_probs, actual_probs)
Пример #36
0
  def testTransformerAttentionLayerCase3(self):
    with self.session(use_gpu=True) as sess:
      depth = 4
      p = layers_with_attention.TransformerAttentionLayer.Params()
      p.name = 'transformer_atten'
      p.source_dim = depth
      p.is_masked = False
      p.num_attention_heads = 2
      transformer_atten = layers_with_attention.TransformerAttentionLayer(p)

      (query_vec, _, aux_vecs,
       aux_paddings) = self._testTransformerAttentionLayerInputs(depth=depth)

      ctx, probs = transformer_atten.FPropDefaultTheta(query_vec, aux_paddings,
                                                       aux_vecs)
      tf.global_variables_initializer().run()
      actual_ctx, actual_probs = sess.run([ctx, probs])
      tf.logging.info(np.array_repr(actual_ctx))
      tf.logging.info(np.array_repr(actual_probs))
      # pylint: disable=bad-whitespace
      # pyformat: disable
      expected_ctx = [
          [[-1.42420077,  1.19024372,  1.35146523,  0.85896158],
           [-0.44974625, -1.00108492,  1.63387251,  1.678146  ]],
          [[ 0.1134335 ,  1.97617495, -0.35918081,  0.26396495],
           [-0.19688171, -0.71197301,  0.0659425 ,  2.5417304 ]],
          [[ 1.58169425,  0.81259179, -0.58948535,  0.20254248],
           [-0.84438968, -0.65845209,  1.45584249,  1.87587976]],
          [[-1.01532316, -0.05166581,  2.07901478,  0.97540361],
           [ 2.08563352,  0.34328598, -0.23240227, -0.19035631]],
          [[-0.53881919, -0.60117185,  0.29170275,  2.6474514 ],
           [-0.88318163,  0.37149727, -0.16098523,  2.3810885 ]]]
      expected_probs = [
          [[ 0.32392544,  0.,  0.27218491,  0.,  0.19574419,  0.,  0.20814547],
           [ 0.,  0.273045  ,  0.,  0.43572819,  0.,  0.2912268 ,  0.]],
          [[ 0.24094662,  0.,  0.23919827,  0.,  0.26563686,  0.,  0.25421822],
           [ 0.,  0.21680018,  0.,  0.33962148,  0.,0.44357836  ,  0.]],
          [[ 0.20083594,  0.,  0.20683075,  0.,  0.28931937,  0.,  0.30301392],
           [ 0.,  0.24710922,  0.,  0.453915  ,  0.,0.29897571  ,  0.]],
          [[ 0.32845193,  0.,  0.26491433,  0.,  0.18304622,  0.,  0.22358747],
           [ 0.,  0.39426237,  0.,  0.19774443,  0.,0.4079932   ,  0.]],
          [[ 0.23542665,  0.,  0.27910906,  0.,  0.30036426,  0.,  0.18510005],
           [ 0.,  0.20147586,  0.,  0.37759233,  0.,  0.42093182,  0.]]]
      # pyformat: enable
      # pylint: enable=bad-whitespace
      self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
      self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
Пример #37
0
    def _testBeamSearchOpHelper(self,
                                b_size,
                                num_beams,
                                seq_len,
                                init_best_score,
                                probs,
                                init_atten_probs,
                                atten_probs,
                                best_scores_expected,
                                cum_scores_expected,
                                scores_expected,
                                hyps_expected,
                                prev_hyps_expected,
                                atten_probs_expected,
                                force_eos_in_last_step=False):

        (best_scores, cumulative_scores, scores, hyps, prev_hyps, done_hyps,
         atten_probs, done, scores, atten_probs) = self._runBeamSearchOpHelper(
             b_size,
             num_beams,
             seq_len,
             init_best_score,
             probs,
             init_atten_probs,
             atten_probs,
             force_eos_in_last_step=force_eos_in_last_step)

        tf.logging.info(np.array_repr(best_scores))
        tf.logging.info(np.array_repr(cumulative_scores))
        tf.logging.info(np.array_repr(scores))
        tf.logging.info(np.array_repr(hyps))
        tf.logging.info(np.array_repr(prev_hyps))
        tf.logging.info(np.array_repr(done_hyps))
        tf.logging.info(np.array_repr(atten_probs))
        tf.logging.info(np.array_repr(done))
        tf.logging.info(np.array_repr(scores))
        tf.logging.info(np.array_repr(atten_probs))

        self.assertAllClose(best_scores_expected, best_scores)
        self.assertAllClose(cum_scores_expected, cumulative_scores)
        self.assertAllClose(scores_expected, scores)
        self.assertAllClose(hyps_expected, hyps)
        self.assertAllClose(prev_hyps_expected, prev_hyps)
        self.assertAllClose(atten_probs_expected, atten_probs)
        self.assertEqual(False, done)

        return done_hyps
 def show(self, count):
     hist = self.collect_percent(count)
     hist_str = np.array_repr(hist,
                              max_line_width=1024,
                              precision=2,
                              suppress_small=True)
     print("Histogram for vertex {}\n{}".format(
         self.vert_id, hist_str))
Пример #39
0
 def execute(self, v):
     print("\n\nMonitorLayer %s at execution %d:" % (self.tag, self.i))
     o = np.get_printoptions()
     np.set_printoptions(threshold=np.nan)
     print(np.array_repr(v))
     np.set_printoptions(**o)
     self.i += 1
     return v
Пример #40
0
 def showProjectionDialog(self):
     """Get and set OpenGL ModelView matrix and focus.
     Useful for setting two different instances to the exact same projection"""
     dlg = uic.loadUi('multilineinputdialog.ui')
     dlg.setWindowTitle('Get and set OpenGL ModelView matrix and focus')
     precision = 8 # use default precision
     MV_repr = np.array_repr(self.MV, precision=precision)
     focus_repr = np.array_repr(self.focus, precision=precision)
     txt = ("self.MV = \\\n"
            "%s\n\n"
            "self.focus = %s" % (MV_repr, focus_repr))
     dlg.plainTextEdit.insertPlainText(txt)
     dlg.plainTextEdit.selectAll()
     if dlg.exec_(): # returns 1 if OK, 0 if Cancel
         txt = str(dlg.plainTextEdit.toPlainText())
         from numpy import array, float32 # required for exec()
         exec(txt) # update self.MV and self.focus, with hopefully no maliciousness
    def test_train(self):
        model = Surrogates.RegressionModels.RidgeRegression.RidgeRegression(sp=self._sp, encode=False, rng=1, debug=True)
        x_train_data = self._data[:1000, :-2]
        y_train_data = self._data[:1000, -1]
        x_test_data = self._data[1000:, :-2]
        y_test_data = self._data[1000:, -1]

        self.assertEqual(hash(numpy.array_repr(x_train_data)), -4233919799601849470)
        self.assertEqual(hash(numpy.array_repr(y_train_data)), -5203961977442829493)

        model.train(x=x_train_data, y=y_train_data, param_names=self._para_header)

        lower, upper = model._scale_info
        should_be_lower = [None, -29.6210089736, 0.201346561323, 0, -20.6929600285, 0, 0, 0, 4.60517018599, 0,
                           2.77258872224, 0, 0, 0.502038871605, -17.2269829469]
        should_be_upper = [None, -7.33342451433, 1.99996215592, 1, -6.92778489957, 2, 1, 1, 9.20883924585, 1,
                           6.9314718056, 3, 1, 0.998243871085, 4.72337617503]

        for idx in range(x_train_data.shape[1]):
            self.assertEqual(lower[idx], should_be_lower[idx])
            self.assertEqual(upper[idx], should_be_upper[idx])

        y = model.predict(x=x_train_data[1, :])
        print "Is: %100.70f, Should: %f" % (y, y_train_data[1])
        self.assertAlmostEqual(y[0], 0.337919078549359763741222195676527917385101318359375, msg=y[0])

        print "Predict whole data"
        y_whole = model.predict(x=x_test_data)
        mse = mean_squared_error(y_true=y_test_data, y_pred=y_whole)
        print "MSE: %100.70f" % mse
        self.assertAlmostEqual(mse, 0.009198484147153261625273756862)

        # Try the same with encoded features
        model = Surrogates.RegressionModels.RidgeRegression.RidgeRegression(sp=self._sp, encode=True, rng=1, debug=True)
        #print data[:10, :-2]
        model.train(x=x_train_data, y=y_train_data, param_names=self._para_header, rng=1)

        y = model.predict(x=self._data[1, :-2])
        print "Is: %100.70f, Should: %f" % (y, self._data[1, -2])
        self.assertAlmostEqual(y[0], 0.337619548171, msg="%f" % y[0])

        print "Predict whole data"
        y_whole = model.predict(x=x_test_data)
        mse = mean_squared_error(y_true=y_test_data, y_pred=y_whole)
        print "MSE: %100.70f" % mse
        self.assertAlmostEqual(mse, 0.0092026737874672301)
Пример #42
0
 def __str__(self):
     """ prints compressed_data object content
     """
     output = 'Data: \n'
     output += array_repr(self.data[:],
                          precision=3,
                          suppress_small=True)
     output += '\n Compression level ' + str(self._compression_level) + '\n'
     return output
Пример #43
0
    def _repr_footer(self):
        levheader = "Levels (%d): " % len(self.levels)
        # TODO: should max_line_width respect a setting?
        levstring = np.array_repr(self.levels, max_line_width=60)
        indent = " " * (levstring.find("[") + len(levheader) + 1)
        lines = levstring.split("\n")
        levstring = "\n".join([lines[0]] + [indent + x.lstrip() for x in lines[1:]])

        namestr = "Name: %s, " % self.name if self.name is not None else ""
        return u("%s\n%sLength: %d" % (levheader + levstring, namestr, len(self)))
Пример #44
0
    def __repr__(self):
        temp = "Categorical: %s\n%s\n%s"
        values = np.asarray(self)
        levheader = "Levels (%d): " % len(self.levels)
        levstring = np.array_repr(self.levels, max_line_width=60)

        indent = " " * (levstring.find("[") + len(levheader) + 1)
        lines = levstring.split("\n")
        levstring = "\n".join([lines[0]] + [indent + x.lstrip() for x in lines[1:]])

        return temp % ("" if self.name is None else self.name, repr(values), levheader + levstring)
Пример #45
0
def main():
    file1 = open(str(sys.argv[1]),'r')
    file2 = open(str(sys.argv[2]),'r')
    cell_1 = UnitCell(file1)
    cell_2 = UnitCell(file2)
    disp = displacements(cell_1,cell_2,conv='C')
    disp = disp.flatten()
    if len(sys.argv) > 3:
        in_name = str(sys.argv[3])
    else:
        in_name = "phonons.out"
    if len(sys.argv) > 4:
        mass_name = str(sys.argv[4])
    else:
        mass_name = "apos.dat"
    freqs,normal_modes = read_normal_modes(in_name)
    mass_vec = read_masses(mass_name)
    normal_disp = calc_atomic_displacements(normal_modes,mass_vec)
    weights = decompose_displacements(normal_disp,disp)
    print np.array_repr(weights,precision=8,suppress_small=True)
    print np.linalg.norm(weights)
Пример #46
0
 def __repr__(self):
     output = 'file name : ' + self.fileName + '\n'
     for m in self.file_metadata.keys():
         output += m + ' : ' + str(self.file_metadata[m]) + '\n'
     output += '\nchannels listed by data groups:\n'
     for d in self.masterChannelList.keys():
         if d is not None:
             output += d + '\n'
         for c in self.masterChannelList[d]:
             output += '  ' + c + ' : ' + str(self[c]['description']) + '\n'
             output += '    ' + array_repr(self[c]['data'], precision=3, suppress_small=True) \
                 + ' ' + self[c]['unit'] + '\n'
     return output
Пример #47
0
    def __repr__(self):
        temp = 'Categorical: %s\n%s\n%s'
        values = np.asarray(self)
        levheader = 'Levels (%d): ' % len(self.levels)
        levstring = np.array_repr(self.levels,
                                  max_line_width=60)

        indent = ' ' * (levstring.find('[') + len(levheader) + 1)
        lines = levstring.split('\n')
        levstring = '\n'.join([lines[0]] + [indent + x.lstrip() for x in lines[1:]])

        return temp % ('' if self.name is None else self.name,
                       repr(values), levheader + levstring)
Пример #48
0
def area_precision_recall(detector_response, actual):
    if type(actual) is str:
        actual_img = cv2.imread(actual, flags=-1)
    else:
        actual_img = actual
    assert actual_img.shape == detector_response.shape, \
        "Actual shape: {0} Got: {1}".format(actual_img.shape, detector_response.shape)
    assert all_binary(detector_response), np.array_repr(np.unique(detector_response))
    assert all_binary(actual_img), actual

    pr = area_precision(detector_response, actual_img)
    rec = area_recall(detector_response, actual_img)
    return pr, rec
Пример #49
0
def compare_response_to_truth(detector_response, actual, cascade=False, thresh=10):
    if type(actual) is str:
        actual_img = cv2.imread(actual, flags=-1)
    else:
        actual_img = actual
    assert actual_img.shape == detector_response.shape, \
        "Actual shape: {0} Got: {1}".format(actual_img.shape, detector_response.shape)
    assert all_binary(detector_response), np.array_repr(np.unique(detector_response))
    assert all_binary(actual_img), actual

    tp = true_positive(detector_response, actual_img, cascade, thresh)
    fp = false_postive(detector_response, actual_img, cascade, thresh)
    fn = false_negative(detector_response, actual_img, cascade, thresh)
    return tp, fp, fn
Пример #50
0
    def save_colormap(self, event):
        import textwrap

        template = textwrap.dedent('''
        from matplotlib.colors import LinearSegmentedColormap
        from numpy import nan, inf

        # Used to reconstruct the colormap in pycam02ucs.cm.viscm
        parameters = {{'xp': {xp},
                      'yp': {yp},
                      'min_Jp': {min_Jp},
                      'max_Jp': {max_Jp}}}

        cm_data = {array_list}

        test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)


        if __name__ == "__main__":
            import matplotlib.pyplot as plt
            import numpy as np

            try:
                from pycam02ucs.cm.viscm import viscm
                viscm(test_cm)
            except ImportError:
                print("pycam02ucs not found, falling back on simple display")
                plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
                           cmap=test_cm)
            plt.show()
        ''')

        rgb, _ = self.cmap_model.get_sRGB(num=256)
        with open('/tmp/new_cm.py', 'w') as f:
            array_list = np.array_repr(rgb, max_line_width=78)
            array_list = array_list.replace('array(', '')[:-1]

            xp, yp = self.cmap_model.bezier_model.get_control_points()

            data = dict(array_list=array_list,
                        xp=xp,
                        yp=yp,
                        min_Jp=self.cmap_model.min_Jp,
                        max_Jp=self.cmap_model.max_Jp)

            f.write(template.format(**data))

            print("*" * 50)
            print("Saved colormap to /tmp/new_cm.py")
            print("*" * 50)
Пример #51
0
    def __str__(self):
        """representation a mdf_skeleton class data strucutre

        Returns:
        ------------
        string of mdf class ordered as below
        master_channel_name
            channel_name   description
            numpy_array    unit
        """
        if self.fileName is not None:
            output = 'file name : ' + self.fileName + '\n'
        else:
            output = ''
        for m in self.file_metadata.keys():
            if self.file_metadata[m] is not None:
                output += m + ' : ' + str(self.file_metadata[m]) + '\n'
        if not self._pandasframe:
            output += '\nchannels listed by data groups:\n'
            for d in self.masterChannelList.keys():
                if d is not None:
                    output += d + '\n'
                for c in self.masterChannelList[d]:
                    output += '  ' + c + ' : '
                    desc = self.getChannelDesc(c)
                    if desc is not None:
                        try:
                            output += str(desc)
                        except:
                            pass
                    output += '\n    '
                    data = self.getChannelData(c)
                    # not byte, impossible to represent
                    if data.dtype.kind != 'V':
                        output += array_repr(data[:],
                                             precision=3, suppress_small=True)
                    unit = self.getChannelUnit(c)
                    if unit is not None:
                        output += ' ' + unit + '\n'
            return output
        else:
            set_option('max_rows', 3)
            set_option('expand_frame_repr', True)
            set_option('max_colwidth', 6)
            for master in self.masterGroups:
                output += master
                output += str(self[master])
            return output
Пример #52
0
    def format_molecule_for_numpy(self, npobj=True):
        """Returns a NumPy array of the non-dummy atoms of the geometry
        in Cartesian coordinates in Angstroms with element encoded as
        atomic number. If *npobj* is False, returns representation of
        NumPy array.

        """
        import numpy as np
        factor = 1.0 if self.PYunits == 'Angstrom' else psi_bohr2angstroms
        self.update_geometry()

        # TODO fn title is format_mol... but return args not compatible
        geo = []
        for i in range(self.natom()):
            [x, y, z] = self.atoms[i].compute()
            geo.append([self.Z(i), x * factor, y * factor, z * factor])

        nparr = np.array(geo)
        return nparr if npobj else np.array_repr(nparr)
Пример #53
0
def count_posneg(img_file):
    if type(img_file) is str:
        image = cv2.imread(img_file, flags=-1)
    else:
        image = img_file
    dims = image.shape
    if len(dims) is 2:
        h, w = dims
    elif len(dims) is 3:
        h, w, d = dims
    else:
        raise Exception("Incorrect image shape")
    pixels = h * w
    positives = np.sum(np.sum(image))
    negatives = pixels - positives

    assert np.array_equal(np.unique(image), np.array([0, 1])) or \
        np.array_equal(np.unique(image), np.array([0])), np.array_repr(np.unique(image))
    return positives, negatives
Пример #54
0
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
    """Returns the string representation of an array.

    Args:
        arr (array_like): Input array. It should be able to feed to
            :func:`cupy.asnumpy`.
        max_line_width (int): The maximum number of line lengths.
        precision (int): Floating point precision. It uses the current printing
            precision of NumPy.
        suppress_small (bool): If True, very small numbers are printed as
            zeros

    Returns:
        str: The string representation of ``arr``.

    .. seealso:: :func:`numpy.array_repr`

    """
    return numpy.array_repr(cupy.asnumpy(arr), max_line_width, precision,
                            suppress_small)
Пример #55
0
def get_vec_str(vec):
    assert vec.ndim==1
    # use numpy for printing (suppresses small values when others are large)
    tmp = np.array_repr(vec, precision=5, suppress_small=True)
    assert tmp.startswith('array([')
    tmp = tmp[7:]
    assert tmp.endswith('])')
    tmp = tmp[:-2]
    tmp = tmp.strip()
    tmps = [t.strip() for t in tmp.split(',')]

    # convert -0 to 0
    tmps2 = []
    for t in tmps:
        if t=='-0.':
            tmps2.append('0.')
        else:
            tmps2.append(t)
    tmps = tmps2

    tmps = ['% 8s'%(t,) for t in tmps ]
    result = ', '.join( tmps )
    return result
Пример #56
0
 def __str__(self):
     str = "Cost as + \  = \  -\n"
     for k in range(0, 21):
         str += np.array_repr(self.Table_Cost[k]) + " " + self.statsName[k] + "\n"
     str += "\nRFD as + \  = \  -\n"
     for k in range(0, 21):
         str += np.array_repr(self.Table_RFD[k]) + " " + self.statsName[k] + "\n"
     str += "\nEPISILON as + \  = \  -\n"
     for k in range(0, 21):
         str += np.array_repr(self.Table_EPSILON[k]) + " " + self.statsName[k] + "\n"
     str += "\nIGD as + \  = \  -\n"
     for k in range(0, 21):
         str += np.array_repr(self.Table_IGD[k]) + " " + self.statsName[k] + "\n"
     str += "\nFt as + \  = \  -\n"
     for k in range(0, 21):
         str += np.array_repr(self.Table_Ft[k]) + " " + self.statsName[k] + "\n"
     str += "\nRFDc as + \  = \  -\n"
     for k in range(0, 21):
         str += np.array_repr(self.Table_RFDc[k]) + " " + self.statsName[k] + "\n"
     return str
Пример #57
0
    def writeStats(self):
        str = "# Stats Cost + / = / -\n"
        for k in range(0, 21):
            str += np.array_repr(self.exp_Cost[k]) + " " + self.statsName[k] + "\n"
        str += "\n# Stats RFD + / = / -\n"
        for k in range(0, 21):
            str += np.array_repr(self.exp_RFD[k]) + " " + self.statsName[k] + "\n"
        str += "\n# Stats EPSILON + / = / -\n"
        for k in range(0, 21):
            str += np.array_repr(self.exp_EPSILON[k]) + " " + self.statsName[k] + "\n"
        str += "\n# Stats IGD + / = / -\n"
        for k in range(0, 21):
            str += np.array_repr(self.exp_IGD[k]) + " " + self.statsName[k] + "\n"
        str += "\n# Stats Ft + / = / -\n"
        for k in range(0, 21):
            str += np.array_repr(self.exp_Ft[k]) + " " + self.statsName[k] + "\n"
        str += "\n# Stats RFDc + / = / -\n"
        for k in range(0, 21):
            str += np.array_repr(self.exp_RFDc[k]) + " " + self.statsName[k] + "\n"

        f = open("stats.data", "w")
        f.write(str)
        f.close()
Пример #58
0
def cosSim(word1, word2, par):
    """
	Args:
		contexts1: contexts of word to be compared with contexts of another word
		contexts2: contexts of word to be compared with contexts of another word

	Returns:
		cosine distributional probability of two words
		also outputs contexts arrays to serialized.txt
	"""

    contexts1 = getContext(word1, CORPUS, WINDOW_SIZE)
    contexts2 = getContext(word2, CORPUS, WINDOW_SIZE)

    keys1 = np.unique(np.array((contexts1, contexts2)).T[0])
    keys2 = np.unique(np.array((contexts1, contexts2)).T[1])

    # all_keys=None

    # for key in range(0, len(keys2)):
    # 	if keys2[key] not in all_keys:
    # 		np.append(all_keys, keys2)

    all_keys = np.sort(np.unique(np.append(keys1, keys2)))

    # print all_keys
    if par is "ppmi":
        array1 = np.array([[i, ppmi(getContext(i, CORPUS, WINDOW_SIZE), contexts1, i, word1)] for i in all_keys])
        array2 = np.array([[i, ppmi(getContext(i, CORPUS, WINDOW_SIZE), contexts2, i, word2)] for i in all_keys])
    elif par is "freq":
        array1 = np.array([[i, contexts1.get(i, 0)] for i in all_keys])
        array2 = np.array([[i, contexts2.get(i, 0)] for i in all_keys])
    elif par is "bin":
        array1 = np.array([[i, contexts1.get(i, 0)] for i in all_keys])
        for i in range(0, len(array1)):
            if array1[i][1].astype(int) > 1:
                array1[i][1] = 1
        array2 = np.array([[i, contexts2.get(i, 0)] for i in all_keys])
        for i in range(0, len(array2)):
            if array2[i][1].astype(int) > 1:
                array2[i][1] = 1
    else:
        return "ERROR"

        # print contexts1, "\n", contexts2
        # print array1, "\n", array2

    array1_i = np.array([i[1] for i in array1], dtype=float)
    array2_i = np.array([i[1] for i in array2], dtype=float)

    # print array1_i, "\n", array2_i

    out = (np.dot(array1_i, array2_i)) / (np.linalg.norm(array1_i) * np.linalg.norm(array2_i))

    file1 = open("serialized.txt", "w+")

    file1.write("Word 1 Frequencies:\n" + np.array_repr(array1) + "\nVector:\n" + np.array_repr(array1_i))
    file1.write("\n\nWord 2 Frequencies:\n" + np.array_repr(array2) + "\nVector:\n" + np.array_repr(array2_i))
    file1.write("\n\n" + np.array_repr(out))

    file1.close()

    return out
Пример #59
0
 def testhomos(self):
     """Is the index of the H**O equal to 34?"""
     numpy.testing.assert_array_equal(self.data.homos, numpy.array([34],"i"), "%s != array([34],'i')" % numpy.array_repr(self.data.homos))
            aggregate_confusions[i][r[-2]][cl] += 1
        for i in map(lambda x:x/10., range(12)):
            cl = r[-4] >= i
            aggregate_score_confusions[i][r[-2]][cl] += 1
    if opts.full:
        csvfull = csv.writer(flouts)
        csvfull.writerow(['trueprobs','sum']+header + ['average_score','uid','link','office_level'])
        for file_counter in range(num_full):
            print len(zip(*full_predictions[file_counter]))
            fulldf_avgs = map(lambda a:sum(a)/len(a),zip(*full_df[file_counter]))
            print len(fulldf_avgs)
            full_predictions[file_counter].append(fulldf_avgs)
            full_predictions[file_counter].append(data_full[file_counter].link)
            full_predictions[file_counter].append(data_full[file_counter].data)
            print len(zip(*full_predictions[file_counter]))
            try:
                for r in zip(*full_predictions[file_counter]):
                    cps = class_prob(conditional_probabilities, class_probabilities, r[:-3],fits, r[-3])
                    csvfull.writerow(['{0:1.5f}'.format(cps[1]),sum(map(lambda x: -1 if x==2 else x,r[:-3]))]+map(str,r[:-3]) + [r[-3],eval(r[-1])['uid'],r[-2],eval(r[-1])['office_level']])
            except Exception as error:
                import pdb;pdb.set_trace()
for k,v in aggregate_confusions.iteritems():
    print k
    print np.array_repr(v,precision=0,suppress_small=True)
keys = aggregate_score_confusions.keys()
keys.sort()
for k in keys:
    v = aggregate_score_confusions[k]
    print k
    print np.array_repr(v,precision=0,suppress_small=True)