Exemple #1
0
def jump(arr: list, value, step: int = None) -> int:
    if step is None: step = int(len(arr)**0.5)
    max_jumps = len(arr) // step - 1
    for i in range(max_jumps):
        if arr[i * step] <= value < arr[(i + 1) * step]:
            return linear(arr[i * step:(i + 1) * step], value) + i * step
    return linear(arr[max_jumps:], value) + max_jumps
Exemple #2
0
 def __call__(self, inputs, state, scope=None):
   """Gated recurrent unit (GRU) with nunits cells."""
   with tf.variable_scope(scope or type(self).__name__):  # "GRUCell"
     with tf.variable_scope("Gates"):  # Reset gate and update gate.
       # We start with bias of 1.0 to not reset and not udpate.
       r, u = tf.split(1, 2, linear.linear([inputs, state], 2 * self._num_units, True, 1.0))
       r, u = tf.sigmoid(r), tf.sigmoid(u)
     with tf.variable_scope("Candidate"):
       c = tf.tanh(linear.linear([inputs, r * state], self._num_units, True))
     new_h = u * state + (1 - u) * c
   return new_h, new_h
 def __call__(self, inputs, state, scope=None):
     """Gated recurrent unit (GRU) with nunits cells."""
     with tf.variable_scope(scope or type(self).__name__):  # "GRUCell"
         with tf.variable_scope("Gates"):  # Reset gate and update gate.
             # We start with bias of 1.0 to not reset and not udpate.
             r, u = tf.split(
                 1, 2,
                 linear.linear([inputs, state], 2 * self._num_units, True,
                               1.0))
             r, u = tf.sigmoid(r), tf.sigmoid(u)
         with tf.variable_scope("Candidate"):
             c = tf.tanh(
                 linear.linear([inputs, r * state], self._num_units, True))
         new_h = u * state + (1 - u) * c
     return new_h, new_h
Exemple #4
0
def layer(xhat, r, onsager, features, linear_name, denoiser_name, information):
    denoiser_fun = getattr(denoiser(information), denoiser_name)
    linear_fun = getattr(linear(information), linear_name)
    z, linear_helper = linear_fun(xhat, r, features)
    features['onsager'] = onsager
    new_xhat, denoiser_helper, den_output = denoiser_fun(
        z, xhat, r, features, linear_helper)
    new_r = features['y'] - batch_matvec_mul(features['H'], new_xhat)
    new_onsager = denoiser_helper['onsager']

    W = linear_helper['W']

    I = tf.eye(information['params']['K'] * 2,
               batch_shape=[information['batchsize_placeholder']])
    e10 = batch_matvec_mul(I - tf.matmul(W, features['H']),
                           information['x'] - xhat)

    e11 = batch_matvec_mul(
        W, features['y'] - batch_matvec_mul(features['H'], information['x']))
    helper = {
        'linear': linear_helper,
        'denoiser': denoiser_helper,
        'stat': {
            'e0': xhat - information['x'],
            'e1': z - information['x'],
            'e2': new_xhat - information['x'],
            'e10': e10,
            'e11': e11
        }
    }

    return new_xhat, new_r, new_onsager, helper, den_output
Exemple #5
0
 def __call__(self, inputs, state, scope=None):
   """Run the cell and output projection on inputs, starting from state."""
   output, res_state = self._cell(inputs, state)
   # Default scope: "OutputProjectionWrapper"
   with tf.variable_scope(scope or type(self).__name__):
     projected = linear.linear(output, self._output_size, True)
   return projected, res_state
Exemple #6
0
def solve(path):
    eq = ocr.ocr(path)
    if (detect.isArith(eq)):
        if (detect.isArithEq(eq)):
            return (f"\t\t{arith.arithEq(eq)}\n")
        else:
            if (arith.arithIneq(eq)):
                return ("\t\tTrue\n")
            else:
                return ("\t\tFalse\n")
    else:
        nterm, var = alg.checknovar(eq)
        if (nterm == 1):
            pow = alg.checkdegree(eq, var[0])
            if (pow == 1):
                return (linear(eq, var[0]))
            elif (pow == 2):
                return (quadratic(eq, var[0]))
            elif (pow == 3):
                return ("Cubic equation\n")
            else:
                return ("Can solve up to 3rd degree polynomial only!!\n")
        elif (nterm == 2):
            return (simult(eq, var, nterm))
        elif (nterm == 3):
            print("Simultaneous equation three variables\n")
        else:
            print("Can solve equations upto 3 vaiables only!!!\n")
 def __call__(self, inputs, state, scope=None):
     """Run the cell and output projection on inputs, starting from state."""
     output, res_state = self._cell(inputs, state)
     # Default scope: "OutputProjectionWrapper"
     with tf.variable_scope(scope or type(self).__name__):
         projected = linear.linear(output, self._output_size, True)
     return projected, res_state
Exemple #8
0
def main():
    """
    Ensure you application will create an empty database if one doesn’t exist
    when the app is first run. Call it customers.db
    """

    print("Start linear ingest from CSV files")

    ret_list_linear = linear()

    print("Start parallel ingest from CSV files")

    ret_list_parallel = parallel()

    print("CSV ingest completed")

    result = show_rentals('P000003')
    print(result)

    print("Linear ingest statistics:")
    for docstats in ret_list_linear:
        print(f"{docstats[0]} doc: num  records: {docstats[3]}")
        print(f"{docstats[0]} doc: time elapsed: {docstats[4]}")

    print("Parallel ingest statistics:")
    for docstats in ret_list_parallel:
        print(f"{docstats[0]} doc: num  records: {docstats[3]}")
        print(f"{docstats[0]} doc: time elapsed: {docstats[4]}")
Exemple #9
0
def _all_answers():
    """
    underscore required on fixture to eliminate an
    invalid redefinition warning
    from pytest pylint

    """

    answers_linear = l.linear()
    answers_parallel = p.parallel()

    return ({
        "processed": answers_linear[0][0],
        "count_prior": answers_linear[0][1],
        "count_new": answers_linear[0][2],
        "elapsed": answers_linear[0][3]
    }, {
        "processed": answers_linear[1][0],
        "count_prior": answers_linear[1][1],
        "count_new": answers_linear[1][2],
        "elapsed": answers_linear[1][3]
    }, {
        "processed": answers_parallel[0][0],
        "count_prior": answers_parallel[0][1],
        "count_new": answers_parallel[0][2],
        "elapsed": answers_parallel[0][3]
    }, {
        "processed": answers_parallel[1][0],
        "count_prior": answers_parallel[1][1],
        "count_new": answers_parallel[1][2],
        "elapsed": answers_parallel[1][3]
    })
Exemple #10
0
    def __init__(self,
                 input_size,
                 output_size,
                 maxpart=2,
                 config=maxout_config()):
        scope = config.scope
        k = maxpart

        transform = linear(input_size, output_size * k, config)

        def forward(inputs):
            z = transform(inputs)
            shape = list(z.shape)
            shape[-1] /= k
            shape += [k]

            z = z.reshape(shape)
            y = theano.tensor.max(z, len(shape) - 1)

            return y

        self.name = scope
        self.config = config
        self.forward = forward
        self.parameter = transform.parameter
Exemple #11
0
def multiply(setup, messages, print_statements=True):
    # get all data and call pre_mult to compute the triple of shares
    computed_triples, field_size, gamma_secret, person_ids = prepare_data(
        setup)
    if True:
        print("Computed triple from PreMult:", computed_triples)
    # create dictionaries for m1, m2, delta, epsilon to store their share values
    share_values = create_dictionaries(computed_triples, field_size, messages,
                                       person_ids)
    if print_statements:
        print("Shares for m_1, m_2:", get_shares('m_1', share_values),
              get_shares('m_2', share_values))
    # reconstruct delta and epsilon from their shares
    delta, _, _, _, _ = reconstruct(setup,
                                    random_subset=False,
                                    subset=get_shares('delta', share_values),
                                    print_statements=False)
    epsilon, _, _, _, _ = reconstruct(setup,
                                      random_subset=False,
                                      subset=get_shares(
                                          'epsilon', share_values),
                                      print_statements=False)
    if print_statements:
        print("Delta:", delta, "Epsilon:", epsilon)
    message_shares = {}
    # each shareholder computes the share of their message m with the given formula
    for i, shareholder in enumerate(person_ids):
        message_shares["s_{}_{}".format(shareholder[0], shareholder[1])] =\
            linear([(1, computed_triples[i][2]), (epsilon, messages[shareholder][0]), (delta, messages[shareholder][1]), (-delta, epsilon)], field_size)
    if print_statements:
        print("Message shares m:", message_shares)
    m, polynomial, _, _, _ = reconstruct(setup,
                                         random_subset=False,
                                         subset=message_shares,
                                         print_statements=False)
    m1, _, _, _, _ = reconstruct(setup,
                                 random_subset=False,
                                 subset=get_shares('m_1', share_values),
                                 print_statements=False)
    m2, _, _, _, _ = reconstruct(setup,
                                 random_subset=False,
                                 subset=get_shares('m_2', share_values),
                                 print_statements=False)
    if not (gamma_secret + ((epsilon * m1) % field_size) +
            ((delta * m2) % field_size) -
            ((epsilon * delta) % field_size)) % field_size == (
                m1 * m2) % field_size:
        print("multiply doesn't reconstruct correctly: ", gamma_secret, "+",
              epsilon, "*", m1, "+", delta, "*", m2, "-", epsilon, "*", delta,
              "=",
              (gamma_secret + ((epsilon * m1) % field_size) +
               ((delta * m2) % field_size) -
               ((epsilon * delta) % field_size) % field_size) % field_size,
              "!=", m1, "*", m2)
        raise ArithmeticError
    elif print_statements:
        print("Shares of m computed correctly")
    return message_shares
Exemple #12
0
def test_00setup():
    global ret_result

    # connect and drop the current database
    with Connection():
        util_drop_all()

    # create the test database
    ret_result = linear()
    def deep_step_fn(i_for_H_t, i_for_T_t, y_tm1, noise_s):
        tanh, sigm = T.tanh, T.nnet.sigmoid
        noise_s_for_H = noise_s if tied_noise else noise_s[0]
        noise_s_for_T = noise_s if tied_noise else noise_s[1]

        s_lm1 = y_tm1
        for l in range(depth):
            s_lm1_for_H = ifelse(_is_training, s_lm1 * noise_s_for_H, s_lm1)
            s_lm1_for_T = ifelse(_is_training, s_lm1 * noise_s_for_T, s_lm1)
            if l == 0:
                # On the first micro-timestep of each timestep we already have bias
                # terms summed into i_for_H_t and into i_for_T_t.
                H = tanh(i_for_H_t + linear(s_lm1_for_H,
                                            params,
                                            in_size=hidden_size,
                                            out_size=hidden_size,
                                            init_scale=init_scale))
                Tr = sigm(i_for_T_t + linear(s_lm1_for_T,
                                             params,
                                             in_size=hidden_size,
                                             out_size=hidden_size,
                                             init_scale=init_scale))
            else:
                H = tanh(
                    linear(s_lm1_for_H,
                           params,
                           in_size=hidden_size,
                           out_size=hidden_size,
                           init_scale=init_scale,
                           bias_init=init_H_bias))
                Tr = sigm(
                    linear(s_lm1_for_T,
                           params,
                           in_size=hidden_size,
                           out_size=hidden_size,
                           init_scale=init_scale,
                           bias_init=init_T_bias))
            s_l = (H - s_lm1) * Tr + s_lm1
            s_lm1 = s_l

        y_t = s_l
        return y_t
Exemple #14
0
  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = tf.split(1, 2, state)
      concat = linear.linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = tf.split(1, 4, concat)

      new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
      new_h = tf.tanh(new_c) * tf.sigmoid(o)

    return new_h, tf.concat(1, [new_c, new_h])
Exemple #15
0
    def __init__(self, input_size, output_size, config=feedforward_config()):
        scope = config.scope
        activation = config.activation

        transform = linear(input_size, output_size, config)

        def forward(x):
            y = transform(x)
            return activation(y)

        self.name = scope
        self.config = config
        self.forward = forward
        self.parameter = transform.parameter
def rand_shares_summation(computed_shares, field_size, r):
    all_shares = [None] * r
    # sum over all alpha shares of one ID and append a '1' (neutral element for multiplication) to call linear
    for j in range(r):
        tmp_shares = []
        for i in range(r):
            tmp_shares.append((1, int(computed_shares[i][j])))
        all_shares[j] = tmp_shares
    summed_shares = []
    # call linear to compute the sum (step 4 in rand_shares)
    # iterate over all alphas of one share to be able to recursively call linear
    for i in range(r):
        summed_shares.append(linear(all_shares[i], field_size))
    return summed_shares
Exemple #17
0
 def attention(query):
   """Put attention masks on hidden using hidden_features and query."""
   ds = []  # Results of attention reads will be stored here.
   for a in xrange(num_heads):
     with tf.variable_scope("Attention_%d" % a):
       y = linear.linear(query, attention_vec_size, True)
       y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
       # Attention mask is a softmax of v^T * tanh(...).
       s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
       a = tf.nn.softmax(s)
       # Now calculate the attention-weighted vector d.
       d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,
                         [1, 2])
       ds.append(tf.reshape(d, [-1, attn_size]))
   return ds
Exemple #18
0
    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope
                               or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            c, h = tf.split(1, 2, state)
            concat = linear.linear([inputs, h], 4 * self._num_units, True)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = tf.split(1, 4, concat)

            new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(
                i) * tf.tanh(j)
            new_h = tf.tanh(new_c) * tf.sigmoid(o)

        return new_h, tf.concat(1, [new_c, new_h])
def _all_answers():
    """
    underscore required on fixture to eliminate an
    invalid redefinition warning
    from pytest pylint

    """

    current_directory = os.path.dirname(__file__)
    parent_directory = os.path.split(current_directory)[0]
    print(current_directory)
    print(parent_directory)

    answers_linear = l.linear(parent_directory, "/data/product.csv",
                              "/data/customer.csv", "/data/rental.csv")
    answers_parallel = p.parallel(parent_directory, "/data/product.csv",
                                  "/data/customer.csv", "/data/rental.csv")

    return ({
        "processed": answers_linear[0][0],
        "count_prior": answers_linear[0][1],
        "count_new": answers_linear[0][2],
        "elapsed": answers_linear[0][3]
    }, {
        "processed": answers_linear[1][0],
        "count_prior": answers_linear[1][1],
        "count_new": answers_linear[1][2],
        "elapsed": answers_linear[1][3]
    }, {
        "processed": answers_parallel[0][0],
        "count_prior": answers_parallel[0][1],
        "count_new": answers_parallel[0][2],
        "elapsed": answers_parallel[0][3]
    }, {
        "processed": answers_parallel[1][0],
        "count_prior": answers_parallel[1][1],
        "count_new": answers_parallel[1][2],
        "elapsed": answers_parallel[1][3]
    })
Exemple #20
0
plt.show()

absorption = []
ne = ne[:, ::10]
nh = nh[:, ::10]
t = t[::10]

for j, item in enumerate(t):
    print("Probe pulse time {}".format(item))
    ans = linear(ti=0,
                 tf=20E-12,
                 N=400,
                 bg=bg_GaAs,
                 meff_el=0.067,
                 meff_holes=0.48,
                 dip_moment=trans_dipole_mom,
                 dephase_E=0.5E-3,
                 Amp=np.sqrt(1e3) * 10000,
                 t0=5E-12,
                 pulse_width=1000E-16,
                 dim=3,
                 ne=ne[:, j],
                 nh=nh[:, j])

    absorption.append(ans)

absorption = np.array(absorption)
plt.contourf(omegas*1e3, t/1e-12, absorption/1e6, 30)
plt.ylabel("Time (ps)")
plt.xlabel("Energy E-Eg (meV)")
bar = plt.colorbar()
bar.set_label(r"Absorption $x10^6$ (m$^-1$)")
Exemple #21
0
def attention_decoder(decoder_inputs, initial_state, attention_states, cell,
                      output_size=None, num_heads=1, loop_function=None,
                      dtype=tf.float32, scope=None):
  """RNN decoder with attention for the sequence-to-sequence model.

  Args:
    decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
    initial_state: 2D Tensor [batch_size x cell.state_size].
    attention_states: 3D Tensor [batch_size x attn_length x attn_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    output_size: size of the output vectors; if None, we use cell.output_size.
    num_heads: number of attention heads that read from attention_states.
    loop_function: if not None, this function will be applied to i-th output
      in order to generate i+1-th input, and decoder_inputs will be ignored,
      except for the first element ("GO" symbol). This can be used for decoding,
      but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
      Signature -- loop_function(prev, i) = next
        * prev is a 2D Tensor of shape [batch_size x cell.output_size],
        * i is an integer, the step number (when advanced control is needed),
        * next is a 2D Tensor of shape [batch_size x cell.input_size].
    dtype: The dtype to use for the RNN initial state (default: tf.float32).
    scope: VariableScope for the created subgraph; default: "attention_decoder".

  Returns:
    outputs: A list of the same length as decoder_inputs of 2D Tensors of shape
      [batch_size x output_size]. These represent the generated outputs.
      Output i is computed from input i (which is either i-th decoder_inputs or
      loop_function(output {i-1}, i)) as follows. First, we run the cell
      on a combination of the input and previous attention masks:
        cell_output, new_state = cell(linear(input, prev_attn), prev_state).
      Then, we calculate new attention masks:
        new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
      and then we calculate the output:
        output = linear(cell_output, new_attn).
    states: The state of each decoder cell in each time-step. This is a list
      with length len(decoder_inputs) -- one item for each time-step.
      Each item is a 2D Tensor of shape [batch_size x cell.state_size].

  Raises:
    ValueError: when num_heads is not positive, there are no inputs, or shapes
      of attention_states are not set.
  """
  if not decoder_inputs:
    raise ValueError("Must provide at least 1 input to attention decoder.")
  if num_heads < 1:
    raise ValueError("With less than 1 heads, use a non-attention decoder.")
  if not attention_states.get_shape()[1:2].is_fully_defined():
    raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
                     % attention_states.get_shape())
  if output_size is None:
    output_size = cell.output_size

  with tf.variable_scope(scope or "attention_decoder"):
    batch_size = tf.shape(decoder_inputs[0])[0]  # Needed for reshaping.
    attn_length = attention_states.get_shape()[1].value
    attn_size = attention_states.get_shape()[2].value

    # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
    hidden = tf.reshape(attention_states, [-1, attn_length, 1, attn_size])
    hidden_features = []
    v = []
    attention_vec_size = attn_size  # Size of query vectors for attention.
    for a in xrange(num_heads):
      k = tf.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size])
      hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
      v.append(tf.get_variable("AttnV_%d" % a, [attention_vec_size]))

    states = [initial_state]

    def attention(query):
      """Put attention masks on hidden using hidden_features and query."""
      ds = []  # Results of attention reads will be stored here.
      for a in xrange(num_heads):
        with tf.variable_scope("Attention_%d" % a):
          y = linear.linear(query, attention_vec_size, True)
          y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
          # Attention mask is a softmax of v^T * tanh(...).
          s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
          a = tf.nn.softmax(s)
          # Now calculate the attention-weighted vector d.
          d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,
                            [1, 2])
          ds.append(tf.reshape(d, [-1, attn_size]))
      return ds

    outputs = []
    prev = None
    batch_attn_size = tf.pack([batch_size, attn_size])
    attns = [tf.zeros(batch_attn_size, dtype=dtype)
             for _ in xrange(num_heads)]
    for a in attns:  # Ensure the second shape of attention vectors is set.
      a.set_shape([None, attn_size])
    for i in xrange(len(decoder_inputs)):
      if i > 0:
        tf.get_variable_scope().reuse_variables()
      inp = decoder_inputs[i]
      # If loop_function is set, we use it instead of decoder_inputs.
      if loop_function is not None and prev is not None:
        with tf.variable_scope("loop_function", reuse=True):
          inp = tf.stop_gradient(loop_function(prev, i))
      # Merge input and previous attentions into one vector of the right size.
      x = linear.linear([inp] + attns, cell.input_size, True)
      # Run the RNN.
      cell_output, new_state = cell(x, states[-1])
      states.append(new_state)
      # Run the attention mechanism.
      attns = attention(new_state)
      with tf.variable_scope("AttnOutputProjection"):
        output = linear.linear([cell_output] + attns, output_size, True)
      if loop_function is not None:
        # We do not propagate gradients over the loop function.
        prev = tf.stop_gradient(output)
      outputs.append(output)

  return outputs, states
Exemple #22
0
 def __call__(self, inputs, state, scope=None):
     """Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
     with tf.variable_scope(scope or type(self).__name__):  # "BasicRNNCell"
         output = tf.tanh(
             linear.linear([inputs, state], self._num_units, True))
     return output, output
Exemple #23
0
 def __call__(self, inputs, state, scope=None):
     """Run the input projection and then the cell."""
     # Default scope: "InputProjectionWrapper"
     with tf.variable_scope(scope or type(self).__name__):
         projected = linear.linear(inputs, self._cell.input_size, True)
     return self._cell(projected, state)
Exemple #24
0
def upload():
    dest = None
    newfile = None
    target = os.path.join(APP_ROOT, 'static/input')
    enhancement = request.form['chooseenhancement']
    upload = request.files.get("file")
    filename = upload.filename
    newfilename = filename.split(".")[0]
    destination = "/".join([target, filename])
    upload.save(destination)
    dest = 'static/input/' + filename
    ran = random.randint(100, 10000)
    if (enhancement == "linear"):
        l.linear(dest, 1, ran)
        inphist = 'static/output/' + newfilename + str(
            ran) + "_linear_inp_hist.jpg"
        outimg = 'static/output/' + newfilename + str(
            ran) + "_linear_out_img.jpg"
        outhist = 'static/output/' + newfilename + str(
            ran) + "_linear_out_hist.jpg"
        enhancename = "Linear"
    elif (enhancement == "stdev"):
        k = request.form['k']
        s.stdev(dest, 1, ran, k)
        inphist = 'static/output/' + newfilename + str(
            ran) + "_stdev_inp_hist.jpg"
        outimg = 'static/output/' + newfilename + str(
            ran) + "_stdev_out_img.jpg"
        outhist = 'static/output/' + newfilename + str(
            ran) + "_stdev_out_hist.jpg"
        enhancename = "Standard Deviation"
    elif (enhancement == "histeq"):
        h.histeq(dest, 1, ran)
        inphist = 'static/output/' + newfilename + str(
            ran) + "_histeq_inp_hist.jpg"
        outimg = 'static/output/' + newfilename + str(
            ran) + "_histeq_out_img.jpg"
        outhist = 'static/output/' + newfilename + str(
            ran) + "_histeq_out_hist.jpg"
        enhancename = "Histogram Equalization"
    elif (enhancement == "log"):
        log.log(dest, 1, ran)
        inphist = 'static/output/' + newfilename + str(
            ran) + "_log_inp_hist.jpg"
        outimg = 'static/output/' + newfilename + str(ran) + "_log_out_img.jpg"
        outhist = 'static/output/' + newfilename + str(
            ran) + "_log_out_hist.jpg"
        enhancename = "Logarithmic"
    else:
        g = request.form['g']
        e.exp(dest, 1, ran, g)
        inphist = 'static/output/' + newfilename + str(
            ran) + "_exp_inp_hist.jpg/"
        outimg = 'static/output/' + newfilename + str(
            ran) + "_exp_out_img.jpg/"
        outhist = 'static/output/' + newfilename + str(
            ran) + "_exp_out_hist.jpg/"
        enhancename = "Exponential"
    return render_template("enhance.html",
                           inpimg=dest,
                           outimg=outimg,
                           inphist=inphist,
                           outhist=outhist,
                           enhancement=enhancename)
from luenberger import luenberger


# Define global parameters
M_p     = 0.86   # Masa de la polea
Y       = 2e6    # Modulo de Young de la cinta
A       = 14e-6  # Area transversal de la cinta
l       = 0.41   # Distancia entre poleas 2 y 3
r       = 0.05   # Radio de las poleas
k_f     = 1.0    # Constante de friccion
L_k     = 0.3    # Ver Diagrama del modelo
rho_o   = 714    # Densidad de la cinta en reposo
rho_e   = 714    # Densidad de la cinta por fuera del sistema

# Create a linear object, and define sizes
sys = linear()

x = sys.state(3)
u = sys.input(2)

# Define global functions that can be defined in an explicit way, i.e.
# y = f(x_1,x_2,...x_n) \forall n \in N and x_i \notequal y
def L(beta):
    return L_k + 4 * r * ( beta - tan(beta)) + l / cos(beta)

def T(rho):
    return Y * A * (rho_o /rho - 1)

def rho(L,M):
    return M /(A * L)
Exemple #26
0
quotes_high = [i for i in quotes_high if i]
'''
checked_quotes = []
for i in range(len(quotes_high)):

    if not(i):
        print(i)
        print("next " + str( checked_quotes[i+1]))
        checked_quotes.append(quotes_high[i-1])
    else:
        checked_quotes.append(quotes_high[i])
quotes_high = checked_quotes
'''
#print(quotes_high)
a = []

for i in range(len(quotes_high)):
    a.append(i)

#last val is current date
#print(quotes_high[-1])
#first val is all the way back
#print(quotes_high[0])
m, b = linear(a, quotes_high, plot=True)
if m > 0:
    #in upward trend
    if m > 0.15 and m < 0.4:
        print("medium (improving) upward trend")
    elif m >= 0.4:
        print("extremley high trend")
Exemple #27
0
    def __init__(self, input_size, output_size, config=gru_config()):
        scope = config.scope
        concat = config.concat
        activation = config.activation
        complement = config.complement

        if not isinstance(input_size, (list, tuple)):
            input_size = [input_size]

        modules = []

        # config scope
        with variable_scope(scope):
            if not concat:
                isize = input_size + [output_size]
                osize = output_size
                rgate = feedforward(isize, osize, config.reset_gate)
                ugate = feedforward(isize, osize, config.update_gate)
                trans = linear(isize, osize, config.candidate)

                modules.append(rgate)
                modules.append(ugate)
                modules.append(trans)
            else:
                isize = input_size + [output_size]
                osize = output_size
                gates = feedforward(isize, 2 * osize, config.gates)
                trans = linear(isize, osize, config.candidate)

                modules.append(gates)
                modules.append(trans)

        params = []

        for m in modules:
            params.extend(m.parameter)

        def forward(x, h):
            if not isinstance(x, (list, tuple)):
                x = [x]

            if not concat:
                reset_gate = modules[0]
                update_gate = modules[1]
                transform = modules[2]
                r = reset_gate(x + [h])
                u = update_gate(x + [h])
                c = activation(transform(x + [r * h]))
            else:
                gates = modules[0]
                transform = modules[1]
                r_u = gates(x + [h])
                size = r_u.shape[-1] / 2
                r, u = theano.tensor.split(r_u, (size, size), 2, -1)
                c = activation(transform(x + [r * h]))

            if complement:
                y = u * h + (1.0 - u) * c
            else:
                y = (1.0 - u) * h + u * c

            return y, y

        self.name = scope
        self.config = config
        self.forward = forward
        self.parameter = params
Exemple #28
0
def compute_delta_epsilon(m_1, m_2, computed_shares, field_size):
    share_delta = linear([(1, m_1), (-1, computed_shares[0])], field_size)
    share_epsilon = linear([(1, m_2), (-1, computed_shares[1])], field_size)
    # print(share_delta, share_epsilon, "<--")
    return share_delta, share_epsilon
Exemple #29
0
 def __call__(self, inputs, state, scope=None):
   """Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
   with tf.variable_scope(scope or type(self).__name__):  # "BasicRNNCell"
     output = tf.tanh(linear.linear([inputs, state], self._num_units, True))
   return output, output
def model(inputs, _is_training, params, depth, batch_size, hidden_size, drop_i,
          drop_s, init_scale, init_T_bias, init_H_bias, tied_noise,
          _theano_rng):
    noise_i_for_H = get_dropout_noise((batch_size, hidden_size), drop_i,
                                      _theano_rng)
    noise_i_for_T = get_dropout_noise(
        (batch_size, hidden_size), drop_i,
        _theano_rng) if not tied_noise else noise_i_for_H

    i_for_H = ifelse(_is_training, noise_i_for_H * inputs, inputs)
    i_for_T = ifelse(_is_training, noise_i_for_T * inputs, inputs)

    i_for_H = linear(i_for_H,
                     params,
                     in_size=hidden_size,
                     out_size=hidden_size,
                     init_scale=init_scale,
                     bias_init=init_H_bias)
    i_for_T = linear(i_for_T,
                     params,
                     in_size=hidden_size,
                     out_size=hidden_size,
                     init_scale=init_scale,
                     bias_init=init_T_bias)

    # Dropout noise for recurrent hidden state.
    noise_s = get_dropout_noise((batch_size, hidden_size), drop_s, _theano_rng)
    if not tied_noise:
        noise_s = T.stack(
            noise_s,
            get_dropout_noise((batch_size, hidden_size), drop_s, _theano_rng))

    def deep_step_fn(i_for_H_t, i_for_T_t, y_tm1, noise_s):
        tanh, sigm = T.tanh, T.nnet.sigmoid
        noise_s_for_H = noise_s if tied_noise else noise_s[0]
        noise_s_for_T = noise_s if tied_noise else noise_s[1]

        s_lm1 = y_tm1
        for l in range(depth):
            s_lm1_for_H = ifelse(_is_training, s_lm1 * noise_s_for_H, s_lm1)
            s_lm1_for_T = ifelse(_is_training, s_lm1 * noise_s_for_T, s_lm1)
            if l == 0:
                # On the first micro-timestep of each timestep we already have bias
                # terms summed into i_for_H_t and into i_for_T_t.
                H = tanh(i_for_H_t + linear(s_lm1_for_H,
                                            params,
                                            in_size=hidden_size,
                                            out_size=hidden_size,
                                            init_scale=init_scale))
                Tr = sigm(i_for_T_t + linear(s_lm1_for_T,
                                             params,
                                             in_size=hidden_size,
                                             out_size=hidden_size,
                                             init_scale=init_scale))
            else:
                H = tanh(
                    linear(s_lm1_for_H,
                           params,
                           in_size=hidden_size,
                           out_size=hidden_size,
                           init_scale=init_scale,
                           bias_init=init_H_bias))
                Tr = sigm(
                    linear(s_lm1_for_T,
                           params,
                           in_size=hidden_size,
                           out_size=hidden_size,
                           init_scale=init_scale,
                           bias_init=init_T_bias))
            s_l = (H - s_lm1) * Tr + s_lm1
            s_lm1 = s_l

        y_t = s_l
        return y_t

    y_0 = shared_zeros((batch_size, hidden_size))

    y, _ = theano.scan(deep_step_fn,
                       sequences=[i_for_H, i_for_T],
                       outputs_info=[y_0],
                       non_sequences=[noise_s])

    y_last = y[-1]
    sticky_state_updates = [(y_0, y_last)]

    return y, y_0, sticky_state_updates
Exemple #31
0
 def __call__(self, inputs, state, scope=None):
   """Run the input projection and then the cell."""
   # Default scope: "InputProjectionWrapper"
   with tf.variable_scope(scope or type(self).__name__):
     projected = linear.linear(inputs, self._cell.input_size, True)
   return self._cell(projected, state)
    def oneDB(self):

        self.barcode = linear.linear(self.FName)
        print(self.barcode)
        self.resultBox.setText(self.barcode)
Exemple #33
0
border = {"width": 620, "height": 620}

# draw square and coordinate
print("< Jeffrey's Plotting Software >")
print("drawing rectangular coordinates...")
square.draw_square(border["width"], border["height"], -310, -300)
penup()
setx(0)
sety(0)
pendown()
coordinates.draw_coordinate()

draw_finished = False
while not draw_finished:
    function_type = int(input("linear : 1, quadratic : 2 "))
    if (function_type == 1):
        slope = int(input("type the slope : "))
        y_incpt = int(input("type the y intercept : "))
        linear.linear(slope, y_incpt)
    if (function_type == 2):
        a = float(input("type a : "))
        b = int(input("type b : "))
        c = int(input("type c : "))
        quadratic.quadratic(a, b, c)
    draw_ends = input("press N to finish drawing. ")
    if (draw_ends == "N"):
        draw_finished = True

done()