def solve_contractive_sign(known_T, weight, bias, LAYER):

    print("Solve the extraction problem for contractive networks")

    def get_preimage(hidden):
        preimage = hidden

        for i, (my_A, my_B) in reversed(
                list(enumerate(zip(known_T.A + [weight],
                                   known_T.B + [bias])))):
            if i == 0:
                res = scipy.optimize.lsq_linear(my_A.T,
                                                preimage - my_B,
                                                bounds=(-np.inf, np.inf))
            else:
                res = scipy.optimize.lsq_linear(my_A.T,
                                                preimage - my_B,
                                                bounds=(0, np.inf))

            preimage = res.x
        return preimage[np.newaxis, :]

    hidden = np.zeros((sizes[LAYER + 1]))

    preimage = get_preimage(hidden)

    extended_T = known_T.extend_by(weight, bias)

    standard_out = run(preimage)

    signs = []

    for axis in range(len(hidden)):
        h = np.array(hidden)
        h[axis] = 10
        preimage_plus = get_preimage(h)
        h[axis] = -10
        preimage_minus = get_preimage(h)

        print("Confirm preimage")

        if np.any(extended_T.forward(preimage) > 1e-5):
            raise AcceptableFailure()

        out_plus = run(preimage_plus)
        out_minus = run(preimage_minus)

        print(standard_out, out_plus, out_minus)

        inverted_if_small = np.sum(np.abs(out_plus - standard_out))
        not_inverted_if_small = np.sum(np.abs(out_minus - standard_out))

        print("One of these should be small", inverted_if_small,
              not_inverted_if_small)

        if inverted_if_small < not_inverted_if_small:
            signs.append(-1)
        else:
            signs.append(1)
    return signs
Пример #2
0
def run_spiders():
    bots = (
        (AvitoSpider, {
            'thread_number': 2
        }),
        (IRRSpider, {
            'thread_number': 2
        }),
    )
    run(bots, debug=True)
def get_grad(x, direction, eps=1e-6):
    """
    Finite differences to estimate the gradient.
    Uses just two coordinates---that's sufficient for most of the code.

    Can fail if we're right at a critical point and we get the left and right side.
           /
          X
         /
    -X--/

    """
    x = x[np.newaxis,:]
    a = run(x-eps*direction)
    b = run(x)
    g1 = (b-a)/eps
    return g1
def solve_final_layer(known_T, inputs, outputs):
    if CHEATING:
        for i, (normal, bias) in enumerate(zip(known_T.A, known_T.B)):
            logger.log('', level=Logger.INFO)
            logger.log("LAYER", i, level=Logger.INFO)
            check_quality(i, normal, bias)

    outputs = run(inputs)
    hidden = known_T.forward(inputs, with_relu=True)

    hidden = np.concatenate([hidden, np.ones((hidden.shape[0], 1))], axis=1)

    solution = np.linalg.lstsq(hidden, outputs)

    vector = solution[0]

    At = known_T.A + [vector[:-1]]
    Bt = known_T.B + [vector[-1]]

    logger.log("SAVING",
               "./models/extracted-%s.p" % "-".join(map(str, sizes)),
               level=Logger.INFO)

    pickle.dump([At, Bt],
                open("./models/extracted-%s.p" % "-".join(map(str, sizes)),
                     "wb"))

    from src.global_vars import __cheat_A, __cheat_B
    pickle.dump([__cheat_A, __cheat_B],
                open("./models/real-%s.p" % "-".join(map(str, sizes)), "wb"))

    def loss(x):
        return (run(x, inner_A=At, inner_B=Bt) -
                run(x, inner_A=__cheat_A, inner_B=__cheat_B))

    ls = []
    for _ in range(1):
        print(_)
        inp = np.random.normal(0, 1, (100000, A[0].shape[0]))
        ls.extend(loss(inp).flatten())

    logger.log("\n\n", level=Logger.INFO)

    logger.log("Finally we are done.\n", level=Logger.INFO)

    max_loss = np.max(np.abs(ls))

    res = open("results.txt", "a")
    res.write(str(max_loss))
    res.close()

    logger.log('Maximum logit loss on the unit sphere',
               max_loss,
               level=Logger.INFO)
    logger.log("\nfin", level=Logger.INFO)
def get_second_grad_unsigned(x, direction, eps, eps2):
    """
    Same as the above but batched so it's more efficient.
    """
    x = np.array([x + direction * (eps - eps2),
                  x + direction * (eps),
                  x - direction * (eps - eps2),
                  x - direction * (eps)])

    out = run(x)
    
    return np.dot(out.flatten(), MASK)/eps
 def loss(x):
     return (run(x, inner_A=At, inner_B=Bt) -
             run(x, inner_A=__cheat_A, inner_B=__cheat_B))
Пример #7
0
    make_data_and_env(args)
  agent, evaluator = make_follower_models(args, len(vocab), all_val_data, env)
  if args.reward and not args.no_speaker:
    speaker = make_speaker_models(args, len(vocab), env, tok)
    speaker.load(args.speaker_prefix, **{})
    print("Load speaker model %s" % args.speaker_prefix)
  else:
    speaker = None
  if args.follower_prefix is not None:
    agent.load(args.follower_prefix, args.load_opt, **{})
    print("Load follower model %s" % args.follower_prefix)
  return agent, train_data, val_data, evaluator, speaker, train_tag


def train_val(args):
  ''' Train on the training set, and validate on validation (seen/unseen) set. '''
  agent, train_data, val_data, evaluator, speaker, train_tag = \
    train_setup(args)
  train(args, agent, train_data, val_data, evaluator, speaker, train_tag)


def make_arg_parser():
  parser = argparse.ArgumentParser()
  ImageFeatures.add_args(parser)
  add_general_args(parser)
  return parser


if __name__ == "__main__":
  run(make_arg_parser(), train_val)
Пример #8
0
 def memo_forward_pass(x):
     if x not in c:
         c[x] = run((offset + direction * x)[np.newaxis, :])
     return c[x]