示例#1
0
 def advance(self):
     #if it's taking up the whole camera it's definately hit or just about to
     #not sure what the whole camera is though.
     if(self.lastKnownBuoyLoc.width < 500):
         forward(.3)
     else:
         forward(0.0)
示例#2
0
def main():
    args = docopt(__doc__)

    use_duration = True if args["-D"] else False
    window = int(args["-w"])

    models = model.extract_models(model.ZIPPED_MODELS)

    if args["-B"]:
        models = classification.binarize(models)

    for line in sys.stdin:
        splitted = line.strip().split("\t")

        if len(splitted) == 5:
            mid_terminal, uid, bid, aid, histories = splitted
            term = classification.terminal_name(mid_terminal)
            paths = stream.decode(histories)
            assert len(paths) == 1

            decision = classification.query(term, paths[0], models, use_duration=use_duration, window=window)

            if decision:
                forward(
                    mid_terminal,
                    uid,
                    bid,
                    aid,
                    decision.decision,
                    decision.prob,
                    decision.path,
                    decision.probs,
                    decision.principal,
                )
示例#3
0
 def execute(self, userdata):
     forward(0.5)
     for i in range(1000):
         if self.preempt_requested():
             self.service_preempt()
             return 'preempted'
         rospy.sleep(1)
     return 'preempted'
示例#4
0
def dump(key, model):
    split = lambda l, n: (l[x: x + n] for x in xrange(0, len(l), n))
    splitted = split(encode(model), MAX_SIZE)

    for i, code in enumerate(splitted):
        keys = key.split('-')

        if len(keys) > 1:
            forward('-'.join(keys[0:-1]), keys[-1], i, code)
        else:
            forward(key, 'NULL', i, code)
示例#5
0
 def execute(self, userdata):
     move('Depth', 'Command', 2.0)
     for i in range(4):
         if self.preempt_requested():
             self.service_preempt()
             return 'preempted'
         rospy.sleep(1)
     forward(.75)
     for i in range(self.GO_FORWARD_TIMEOUT):
         if self.preempt_requested():
             self.service_preempt()
             return 'preempted'
         rospy.sleep(1)
     return 'succeeded'
示例#6
0
文件: svm.py 项目: colinsongf/CN_QA
    def train_save(self):
        train_data, train_target, test_data, test_target = self.get_train_test_data(
        )
        print(len(train_data), len(train_target))
        print(len(test_data), len(test_target))
        # train_data = np.array(train_data, dtype = 'float')
        # test_data = np.array(test_data, dtype = 'float')
        self.cl.fit(train_data, train_target)

        predict = self.cl.predict(test_data)
        utils.forward(self)
        Loader.save_model(self.cl, self.model_path, "svm")

        print(metrics.accuracy_score(test_target, predict))
示例#7
0
def main():

    X, T = get_facialexpression(balance_ones=True)
    # X, T  = np.shuffle(X,T)
    label_map = [
        'Anger', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'
    ]
    # klass =3  error_rate=0.0
    # klass =4  error_rate=0.0
    # klass =5  error_rate=0.0
    # klass =0
    klass = 4
    N, D = X.shape
    X = np.concatenate(
        (np.ones((N, 1)), X),
        axis=1,
    )
    T = T.astype(np.int32)
    X = X.astype(np.float32)
    #Fix for forecasting on one image
    T = class1detect(T, detect=klass)

    D += 1

    # params
    lr = 5e-7
    max_iteration = 150
    W = np.random.randn(D) / np.sqrt(D)
    cost = []
    error = []
    for i in xrange(max_iteration):
        Y = forward(W, X)
        cost.append(cross_entropy(T, Y))
        error.append(error_rate(T, Y))

        W += lr * X.T.dot(T - Y)

        if i % 5 == 0:
            print "i=%d\tcost=%.3f\terror=%.3f" % (i, cost[-1], error[-1])

    if i % 5 == 0:
        print "i=%d\tcost=%.3f\terror=%.3f" % (i, cost[-1], error[-1])

    print "Final weight:", W
    print T
    print np.round(Y)

    plt.title('logistic regression ' + label_map[klass])
    plt.xlabel('iterations')
    plt.ylabel('cross entropy')
    plt.plot(cost)
    plt.show()

    plt.title('logistic regression ' + label_map[klass])
    plt.xlabel('iterations')
    plt.ylabel('error rate')
    plt.plot(error)
    plt.show()
 def from_pretrained(cls, path, output_hidden_states=False):
     if os.path.exists(path):
         parameters = utils.read_yaml(path)
     else:
         raise ValueError("{} doesn't exists.".format(path))
     model = cls(**parameters)
     model.load_state_dict(torch.load(parameters['weights_path']))
     if output_hidden_states:
         model.rnn.forward = lambda input, hidden: utils.forward(model.rnn, input, hidden, model.param)
     return model
示例#9
0
 def execute(self, userdata):
     self.pub = rospy.Publisher('/Module_Enable', ModuleEnableMsg)
     self.sub = rospy.Subscriber('/Task_Completion', String, self.task_complete)
     self.objSub = rospy.Subscriber('img_rec/paths', ImgRecObject, self.objCallback)
     msg = ModuleEnableMsg()
     msg.Module = 'NewPathTask'
     msg.State = True
     self.pub.publish(msg)
     dive(4)
     while self.timeout>0:
         if self.is_complete:
             self.disable_task()
             return 'succeeded'
         if self.preempt_requested():
             self.disable_task()
             self.service_preempt()
             return 'preempted'
         forward(0.35)
         rospy.sleep(1)
         self.timeout-=1
     self.disable_task()
     return 'timeout'
示例#10
0
 def train_save(self):
     train_data, train_target, test_data, test_target = self.get_train_test_data(
     )
     print(len(train_data), len(train_target))
     print(len(test_data), len(test_target))
     train_data = np.array(train_data)
     test_data = np.array(test_data)
     print(len(train_data))
     best = -999999
     for i in range(10):
         self.model.fit(train_data,
                        train_target,
                        batch_size=20,
                        nb_epoch=1,
                        shuffle=True)
         score = self.model.evaluate(test_data, test_target, batch_size=20)
         print(score)
         if score[1] > best:
             print('forward!!')
             utils.forward(self)
             best = score[1]
             Loader.save_model(self.model, self.model_path, "lstm",
                               self.params_path)
示例#11
0
    def forward(self, x):
        '''
        '''
        N = x.shape[0]
        y = np.zeros((N, self.dim_out))

        self.fx = np.zeros((N, self.hout * self.wout, self.dim_k))

        for i in range(N):
            self.fx[i, ] = utils.flatten(x[i, :].reshape(self.shape_in),
                                         self.shape_in, self.shape_k, self.pad,
                                         self.stride, self.indice).reshape(
                                             self.hout * self.wout, -1)
            y[i, ] = utils.forward(self.fx[i, ], self.w, self.b).T.ravel()
        return y
示例#12
0
 def execute(self, userdata):
     self.reset()
     sub = rospy.Subscriber('img_rec/paths', ImgRecObject, self.found_path_cb)
     dive(self._target_depth)
     for i in range(self._dive_time):
         if self.preempt_requested():
             self.service_preempt()
             sub.unregister()
             return 'preempted'
         rospy.sleep(1)
     forward(.3)
     for i in range(self._timeout):
         if self.found_path:
             sub.unregister()
             #Start slowing down
             forward(-.1)
             rospy.Timer(rospy.Duration(self._slow_down_duration), self.slow_down_stop, True)
             #done slowing down.
             sub.unregister()
             return 'found_path'
         rospy.sleep(1)
     sub.unregister()
     self.reset()
     return 'timed_out'
示例#13
0
def main():
    args = docopt(__doc__)

    for line in sys.stdin:
        term, histories = history.build(line)

        if term and histories:
            assert len(histories) == 1

            if args['train']:
                mergers = int(args['-m'])
                reducers = int(args['-r'])

                key = term
                n = int(ceil(pow(reducers, 1.0 / mergers)))
                for i in xrange(mergers):
                    key += '-%d' % randint(0, n)

                forward(key, stream.encode(histories))
            elif args['classify']:
                fields = history.extract_fields(line)

                forward(fields.mid_terminal, fields.uid, fields.bid, fields.aid,
                        stream.encode(histories))
示例#14
0
    def discriminator(self):
        disc = [
            ConvLayer(num_filters=64,
                      kernel_size=4,
                      stride=2,
                      padding="SAME",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      activation=leakyrelu),
            ConvLayer(num_filters=128,
                      kernel_size=4,
                      stride=2,
                      padding="SAME",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      normalizer=slim.batch_norm,
                      activation=leakyrelu),
            ConvLayer(num_filters=256,
                      kernel_size=4,
                      stride=2,
                      padding="SAME",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      normalizer=slim.batch_norm,
                      activation=leakyrelu),
            ConvLayer(num_filters=512,
                      kernel_size=4,
                      stride=2,
                      padding="SAME",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      normalizer=slim.batch_norm,
                      activation=leakyrelu),
            ConvLayer(num_filters=1,
                      kernel_size=4,
                      stride=1,
                      padding="SAME",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      normalizer=slim.batch_norm,
                      activation=leakyrelu)
        ]

        return forward(disc)
def main():
	user_action=3
	X, T  = get_ecommerce(user_action=user_action)
	# X, T  = np.shuffle(X,T)

	N, D  = X.shape 
	X 		= np.concatenate((np.ones((N,1)), X), axis=1, ) 
	T = T.astype(np.int32)
	X = X.astype(np.float32)
	D+=1

	# params
	lr = 5e-4
	max_iteration=1000
	W  		= np.random.randn(D) / np.sqrt(D)
	cost 	= []
	error = [] 
	for i in xrange(max_iteration):
		Y = forward(W, X)
		cost.append(cross_entropy(T,Y))
		error.append(error_rate(T,Y))

		W += lr*X.T.dot(T-Y)

		if i % 5 == 0:
			print "i=%d\tcost=%.3f\terror=%.3f" % (i,cost[-1],error[-1])

	if i % 5 == 0:
			print "i=%d\tcost=%.3f\terror=%.3f" % (i,cost[-1],error[-1])
					
	print "Final weight:", W 	

	plt.title('logistic regression user_action=%d' % (user_action))
	plt.xlabel('iterations')
	plt.ylabel('cross entropy')
	plt.plot(cost)
	plt.show()

	plt.title('logistic regression user_action=%d' % (user_action))
	plt.xlabel('iterations')
	plt.ylabel('error rate')
	plt.plot(error)
	plt.show()
示例#16
0
 def __call__(self, x):
     layers = [
         PaddingLayer(self.padding_size, self.padding_type),
         ConvLayer(num_filters=self.num_filters,
                   kernel_size=self.kernel_size,
                   stride=self.stride,
                   padding="VALID",
                   weights_init=self.weights_init,
                   normalizer=self.normalizer,
                   activation=self.activation),
         PaddingLayer(self.padding_size, self.padding_type),
         ConvLayer(num_filters=self.num_filters,
                   kernel_size=self.kernel_size,
                   stride=self.stride,
                   padding="VALID",
                   weights_init=self.weights_init,
                   normalizer=self.normalizer,
                   activation=None)
     ]
     res = forward(layers)(x) + x
     return res
示例#17
0
 def forward(self, x):
     self.x = x
     return utils.forward(x, self.w, self.b)
示例#18
0
def main():
    dset = dataset(dconf)
    # dset.set_batch_size([25, 25])

    input_dim = dconf['input_dim']
    output_dim = dconf['output_dim']
    lr = 1e-1
    # a random labeled example
    xl = tf.placeholder(tf.float32, input_dim)
    # a random neighbor
    xn = tf.placeholder(tf.float32, input_dim)
    # a random unlabeled example
    xu = tf.placeholder(tf.float32, input_dim)

    yl = forward(dconf['netName_label'], xl)
    yn = forward(dconf['netName_neighbor'], xn)
    yu = forward(dconf['netName_unlabel'], xu)
    yt = tf.placeholder(tf.float32, output_dim)

    loss_ = supervised_loss('abs_quadratic')(yl, yt)
    if not dconf['supervise only']:
        # add loss based on manifold neighbor
        loss_ += graph_loss('LE')(yl, yn, 1.0)
        loss_ += graph_loss('LE')(yl, yu, 0.0)
        # add loss based on cluster
        loss_ += cluster_loss('S3VM')(yn)

    opt = tf.train.AdagradOptimizer(lr)
    gvs = opt.compute_gradients(loss_)
    clipped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var)
                   for grad, var in gvs]
    train_step = opt.apply_gradients(clipped_gvs)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    # Train
    # actually this is the number of labeled samples used in training instead of epoch
    # because we cannot perform batch computation here
    nb_epoch = 100000
    for i in tqdm(range(nb_epoch)):
        xl_, xn_, xu_, yt_ = dset.pick()
        sess.run(train_step, feed_dict={xl: xl_, xn: xn_, xu: xu_, yt: yt_})

    # Test trained model
    with sess.as_default():
        if dconf['multi-label']:
            yl_ = sess.run(yl, feed_dict={xl: dset.test['x']})
            yl_ = np.argmax(np.array(yl_), axis=1)
            yt_ = dset.test['y']
            for metric in (int_accuracy, ):
                m_name, m_value = metric(yl_, yt_)
                print(m_name, '%.6f' % m_value)
            for i in range(len(yt_)):
                print((yl_[i], yt_[i]), end='')
        else:
            for keras_metric in (int_accuracy, ):
                print(
                    keras_metric(yt, yl).eval(feed_dict={
                        xl: dset.test['x'],
                        yt: dset.test['y']
                    }))
            yp = sess.run(yl,
                          feed_dict={
                              xl: dset.test['x'],
                              yt: dset.test['y']
                          })
            for i in range(20):
                print(yp[i], dset.test['y'][i])
示例#19
0
def update(doc, parent, user, version, diff):
    if not diff:
        return
    models.lock_acquire(doc)
    now = time.time()
    patch = models.get(doc)
    if not patch:
        patch = []
    pre = []
    version_count = models.get('version:%s:%s' % (user, doc))
    if not version_count:
        version_count = 0
    version_max = models.get('versionmax:%s:%s' % (user, doc))
    if not version_max:
        version_max = 0
    version_time = models.get('versiontime:%s:%s' % (user, doc))
    if not version_time:
        version_time = 0
    same = []
    if parent != version_time:
        models.set('version:%s:%s' % (user, doc), 1, now + 60)
        models.set('versionmax:%s:%s' % (user, doc), version, now + 60)
        models.set('versiontime:%s:%s' % (user, doc), parent, now + 60)
        if version == 1:
            same = [(version, diff)]
        else:
            models.set('versions:%s:%s' % (user, doc), [(version, diff)],
                       now + 60)
    else:
        same = models.get('versions:%s:%s' % (user, doc))
        if not same:
            same = []
        version_count += 1
        models.set('version:%s:%s' % (user, doc), version_count, now + 60)
        if version > version_max:
            version_max = version
        models.set('versionmax:%s:%s' % (user, doc), version_max, now + 60)
        if version_count == version_max:
            same.append((version, diff))
            models.delete('versions:%s:%s' % (user, doc))
        else:
            models.set('versions:%s:%s' % (user, doc),
                       same + [(version, diff)], now + 60)
            same = []
    if not same:
        models.lock_release(doc)
        return
    same = sorted(same)
    version = same[0][0]
    for i in reversed(patch):
        if i['timestamp'] == parent or (i['user'] == user
                                        and i['version'] + 1 == version):
            break
        pre = i['diff'] + pre
    diff = []
    for i in same:
        diff += utils.forward(pre, i[1])
    version = same[-1][0]
    ret = {
        'parent': parent,
        'timestamp': now,
        'user': user,
        'version': version,
        'diff': diff
    }
    models.set(doc,
               filter(lambda x: x['timestamp'] >= now - 60, patch) + [ret])
    models.set('last:%s' % doc, now)
    text = models.get('doc:%s' % doc)
    if text:
        text = text[1]
    else:
        text = ''
    text = utils.text_patch(text, diff)
    models.set('doc:%s' % doc, (now, text))
    models.lock_release(doc)
示例#20
0
 def objCallback(self, msg):
     self.foundObj=True
     self.extendTimeout()
     forward(0)
     self.centerOnPath(msg)
示例#21
0
import pygame
import pickle
import numpy as np
from game import init, iterate
from ann import NeuralNetwork
import utils

# Architecture (Specify archetecture here.)
network = NeuralNetwork(layers=[7, 14, 14, 7, 1],
                        activations=['sigmoid', 'sigmoid', 'sigmoid', 'tanh'])
lr = 0.1
losses = []

screen, font = init()
# Game Loop / Train Loop
frame_count, score, _, _, x = iterate.iterate(screen, font, 0, 0)
game = True
run = True
prediction = 0
while run:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            run = False
    prediction = utils.forward(x, network)
    frame_count, score, game, run, x = iterate.iterate(screen, font,
                                                       frame_count, score,
                                                       game, run, prediction)
    loss = utils.backward(prediction, x, lr, network)
    losses.append(loss)
pygame.quit()
示例#22
0
def update(doc, parent, user, version, diff):
    if not diff:
        return
    models.lock_acquire(doc)
    now = time.time()
    patch = models.get(doc)
    if not patch:
        patch = []
    pre = []
    version_count = models.get('version:%s:%s' % (user, doc))
    if not version_count:
        version_count = 0
    version_max = models.get('versionmax:%s:%s' % (user, doc))
    if not version_max:
        version_max = 0
    version_time = models.get('versiontime:%s:%s' % (user, doc))
    if not version_time:
        version_time = 0
    same = []
    if parent != version_time:
        models.set('version:%s:%s' % (user, doc), 1, now+60)
        models.set('versionmax:%s:%s' % (user, doc), version, now+60)
        models.set('versiontime:%s:%s' % (user, doc), parent, now+60)
        if version == 1:
            same = [(version, diff)]
        else:
            models.set('versions:%s:%s' % (user, doc), [(version, diff)], now+60)
    else:
        same = models.get('versions:%s:%s' % (user, doc))
        if not same:
            same = []
        version_count += 1
        models.set('version:%s:%s' % (user, doc), version_count, now+60)
        if version > version_max:
            version_max = version
        models.set('versionmax:%s:%s' % (user, doc), version_max, now+60)
        if version_count == version_max:
            same.append((version, diff))
            models.delete('versions:%s:%s' % (user, doc))
        else:
            models.set('versions:%s:%s' % (user, doc), same+[(version, diff)], now+60)
            same = []
    if not same:
        models.lock_release(doc)
        return
    same = sorted(same)
    version = same[0][0]
    for i in reversed(patch):
        if i['timestamp'] == parent or (i['user'] == user and i['version']+1 == version):
            break
        pre = i['diff']+pre
    diff = []
    for i in same:
        diff += utils.forward(pre, i[1])
    version = same[-1][0]
    ret = {'parent': parent, 'timestamp': now, 'user': user, 'version': version, 'diff': diff}
    models.set(doc, filter(lambda x:x['timestamp']>=now-60, patch)+[ret])
    models.set('last:%s' % doc, now)
    text = models.get('doc:%s' % doc)
    if text:
        text = text[1]
    else:
        text = ''
    text = utils.text_patch(text, diff)
    models.set('doc:%s' % doc, (now, text))
    models.lock_release(doc)
            '''
            if MODE == "federated":
                client_headers["step"] = str(global_train_step)

            else:
                client_headers["step"] = str(cur_epoch)

            st = time.time()

            xs = x_train[i:i + BATCH_SIZE]
            ys = y_train[i:i + BATCH_SIZE]

            # Logits == predictions
            client_grads, loss, preds = forward(
                inputs=(xs, ys),
                model=model,
                loss_fn=tf.nn.sparse_softmax_cross_entropy_with_logits,
                training=True,
            )
            '''
            If federated, then get synchronized gradient updates from server
            
            If weightavg, then apply normal gradients. Federation occurs at end of epoch.
            
            If cyclic, then apply normal gradients. Federation occurs at end of epoch.
            
            If local, then apply normal gradients and train as usual
            
            '''

            if MODE == "federated":
                grads = federate_vals(URL, client_grads, client_headers)
示例#24
0
 def centerY(self, msg):
     if abs(msg.center_y) > 30:
         forward(-(msg.center_y/400))
         return False
     forward(0)
     return True
            else:
                client_headers["step"] = str(cur_epoch)

            st = time.time()

            xs, ys = get_training_pair(
                ct_vols_train,
                mask_vols_train,
                patch_indices_train,
                batch_size=BATCH_SIZE,
            )

            # Logits == predictions
            client_grads, loss, preds = forward(
                inputs=(xs, ys),
                model=model,
                loss_fn=soft_dice_loss,
                training=True,
            )
            '''
            If federated, then get synchronized gradient updates from server
            
            If weightavg, then apply normal gradients. Federation occurs at end of epoch.
            
            If cyclic, then apply normal gradients. Federation occurs at end of epoch.
            
            If local, then apply normal gradients and train as usual
            
            '''

            if MODE == "federated":
                grads = federate_vals(URL, client_grads, client_headers)
示例#26
0
文件: test.py 项目: vohyz/xtlake
import utils
import numpy as np
layer = [utils.get_layer(3, 8, activation='ReLU', optimizer='Adamoptimizer', regularization=0.1), utils.get_layer(8, 1, activation='linear',optimizer='Adamoptimizer',regularization=0.1)]
batch_x = [[0.5,0.5,0.5]]
batch_y = [[0.5]]
for i in range(10):
    layer, total_loss = utils.train(batch_x, batch_y, layer)
    print(total_loss)
    #print(layer)
utils.save_model(layer, 'model_test.npy')
utils.load_model('model_test.npy')
utils.forward(batch_x, layer)
print(utils.last_layer(layer)['out_'])
示例#27
0
    c = 's'

    while True:

        if isData():
            c = sys.stdin.read(1)
            print(c)

        if c == 's':
            stop_state = True
        elif c == 'h':
            happy_state = not happy_state
            c = ''
        else:  #c == '' or else.
            if c == 'w':
                forward(pwm, pwm)
                stop_state = False
            elif c == 'x':
                backward(pwm, pwm)
                stop_state = False
            elif c == 'a':
                spin_left(pwm, pwm)
                stop_state = False
            elif c == 'd':
                spin_right(pwm, pwm)
                stop_state = False

            elif c == 'q':
                pwm = pwm + pwm_increment if pwm <= max_speed - pwm_increment else pwm
                print("pwm: ", pwm)
            elif c == 'e':
示例#28
0
def predict(X,theta):
    X = np.array(X)
    X.reshape((1,7))
    return forward(X,theta)
示例#29
0
    y_true = []

    cur_step = 1
    iterator = iter(test_dataset)
    elapsed_time = 0.0
    while True:
        try:
            batch_start_time = time.time()
            data = next(iterator)
        except StopIteration:
            break
        else:
            xs, ys = data
            loss, logits = forward(
                inputs=data,
                model=model,
                loss_fn=tf.nn.softmax_cross_entropy_with_logits,
                training=False,
            )

            preds = tf.nn.softmax(logits)

            test_loss.update_state(loss)
            test_accuracy.update_state(
                tf.argmax(ys, axis=1),
                tf.argmax(preds, axis=1),
            )

            y_true.extend(tf.argmax(ys, axis=1))
            y_pred.extend(tf.argmax(preds, axis=1))

            batch_end_time = time.time()
示例#30
0
def main():
    parser = argparse.ArgumentParser()
    # Training
    parser.add_argument('--adv', default=False, action='store_true')
    parser.add_argument('--easy', default=False, action='store_true')
    parser.add_argument('--num_epochs', '-N', type=int, default=100)
    parser.add_argument('--batch_size', '-B', type=int, default=128)
    parser.add_argument('--gpu_id', '-G', type=int, default=-1)
    parser.add_argument('--seed', type=int, default=820)
    # Log
    parser.add_argument('--results_dir', '-R', type=str, default='results')
    # Dataset -- X
    parser.add_argument('--X_std', type=float, default=0.04)
    parser.add_argument('--X_trn_sz', type=int, default=512 * 4)
    parser.add_argument('--X_val_sz', type=int, default=512 * 2)
    # Dataset -- Z
    parser.add_argument('--Z_std', type=float, default=1.0)
    parser.add_argument('--Z_trn_sz', type=int, default=512 * 4)
    parser.add_argument('--Z_val_sz', type=int, default=512 * 2)
    # Optimizer
    parser.add_argument('--n_gen', type=int, default=5)
    parser.add_argument('--n_dis', type=int, default=1)
    parser.add_argument('--alpha', type=float, default=1e-3)
    parser.add_argument('--beta1', type=float, default=0.5)
    parser.add_argument('--beta2', type=float, default=0.999)
    # Models
    parser.add_argument('--in_features', '-in', type=int, default=2)
    parser.add_argument('--latent_features', '-lf', type=int, default=2)
    parser.add_argument('--noise_features', '-nf', type=int, default=2)
    parser.add_argument('--gen_num_layers', '-gnl', type=int, default=2)
    parser.add_argument('--gen_hidden_features', '-ghf', type=int, default=256)
    parser.add_argument('--gen_out_features', '-gof', default=None)
    parser.add_argument('--inf_num_layers', '-inl', type=int, default=2)
    parser.add_argument('--inf_hidden_features', '-ihf', type=int, default=256)
    parser.add_argument('--inf_out_features', '-iof', default=None)
    parser.add_argument('--dis_num_layers', '-dnl', type=int, default=2)
    parser.add_argument('--dis_hidden_features', '-dhf', type=int, default=256)
    parser.add_argument('--dis_out_features', '-dof', type=int, default=1)
    args = parser.parse_args()
    if args.gen_out_features is None:
        args.gen_out_features = args.in_features
    if args.inf_out_features is None:
        args.inf_out_features = args.latent_features

    if args.adv:
        _name = "_ALICE_toydata_unsupervised_adversarial_reconstruction"
    else:
        _name = "_ALICE_toydata_unsupervised_MSE_reconstruction"
    args.results_dir = os.path.join(
        args.results_dir,
        datetime.datetime.now().strftime('%y%m%d-%H%M%S') + _name)
    if not os.path.isdir(args.results_dir):
        os.makedirs(args.results_dir)
    with open(os.path.join(args.results_dir, 'args.json'), 'w') as f:
        json.dump(args.__dict__, f, indent=4)

    if args.gpu_id > -1:
        if torch.cuda.is_available():
            torch.cuda.set_device(args.gpu_id)
    torch.cuda.manual_seed_all(args.seed)
    device = torch.device(
        'cuda' if args.gpu_id > -1 and torch.cuda.is_available() else 'cpu')

    # Prepare dataset X
    rt2 = math.sqrt(2)
    means = {
        'easy': [[0, 0], [5, 5], [-5, 5], [-5, -5], [5, -5]],
        'difficult': [[5 * rt2, 0], [5, 5], [0, 5 * rt2], [-5, 5],
                      [-5 * rt2, 0], [-5, -5], [0, -5 * rt2], [5, -5]]
    }
    key = 'easy' if args.easy else 'difficult'
    means_x = list(
        map(lambda x: torch.tensor(x, dtype=torch.float), means[key]))
    variances_x = [torch.eye(2) * args.X_std for _ in means_x]
    x_trn = gmm_module.GMMData(args.X_trn_sz,
                               means_x,
                               variances_x,
                               seed=args.seed)
    x_trn_loader = torch.utils.data.DataLoader(
        x_trn,
        args.batch_size,
        pin_memory=(args.gpu_id > -1 and torch.cuda.is_available()))
    # Prepare dataset Z
    means_z = list(map(lambda x: torch.tensor(x, dtype=torch.float), [[0, 0]]))
    variances_z = [torch.eye(2) * args.Z_std for _ in means_z]
    z_trn = gmm_module.GMMData(args.Z_trn_sz,
                               means_z,
                               variances_z,
                               seed=args.seed)
    z_trn_loader = torch.utils.data.DataLoader(
        z_trn,
        args.batch_size,
        pin_memory=(args.gpu_id > -1 and torch.cuda.is_available()))
    # Prepare models
    gen = Generator(args.gen_num_layers, args.in_features, args.noise_features,
                    args.gen_hidden_features, args.gen_out_features).to(device)
    inf = Inference(args.gen_num_layers, args.in_features, args.noise_features,
                    args.inf_hidden_features, args.inf_out_features).to(device)
    dis = Discriminator(args.gen_num_layers, args.in_features,
                        args.noise_features, args.dis_hidden_features,
                        args.dis_out_features).to(device)
    if args.adv:
        dis_x = DiscriminatorXX(args.gen_num_layers, args.in_features,
                                args.noise_features, args.dis_hidden_features,
                                args.dis_out_features).to(device)
        dis_z = DiscriminatorZZ(args.gen_num_layers, args.in_features,
                                args.noise_features, args.dis_hidden_features,
                                args.dis_out_features).to(device)
    else:
        dis_x, dis_z = None, None
    opt_gen_inf = torch.optim.Adam(
        list(gen.parameters()) + list(inf.parameters()), args.alpha,
        (args.beta1, args.beta2))
    _params = list(dis.parameters())
    if args.adv:
        _params += list(dis_x.parameters()) + list(dis_z.parameters())
    opt_dis = torch.optim.Adam(_params, args.alpha, (args.beta1, args.beta2))

    # Save figures
    x_gmm_samples = x_trn.samples.numpy()
    z_gmm_samples = z_trn.samples.numpy()
    figure, ax = plt.subplots(1, 1, figsize=(4.5, 4.5))
    ax.scatter(x_gmm_samples[:, 0],
               x_gmm_samples[:, 1],
               label='X',
               marker='.',
               alpha=0.3,
               c=matplotlib.cm.Set1(x_trn.labels.numpy().reshape(
                   (-1, )) / args.in_features / 2.0))
    ax.scatter(z_gmm_samples[:, 0],
               z_gmm_samples[:, 1],
               label='Z',
               marker='.',
               alpha=0.1)
    ax.set_xlim(-10, 10)
    ax.set_ylim(-10, 10)
    plt.legend()
    plt.savefig(os.path.join(args.results_dir, 'dataset.png'))
    plt.close('all')
    torch.save(x_trn, os.path.join(args.results_dir, 'x_trn.pkl'))
    torch.save(z_trn, os.path.join(args.results_dir, 'z_trn.pkl'))

    for epoch in tqdm.tqdm(range(args.num_epochs)):
        _mom, epoch_dis_loss, epoch_gen_loss = .9, 0, 0
        for i, (x_batch, z_batch) in tqdm.tqdm(
                enumerate(zip(x_trn_loader, z_trn_loader))):
            x, *_ = x_batch
            z, *_ = z_batch
            x, z = x.to(device), z.to(device)

            iter_dis_loss = .0
            for j in range(args.n_dis):
                dis_loss_opt, _ = forward(device, args.adv, x, z, gen, inf,
                                          dis, dis_x, dis_z, False)
                opt_dis.zero_grad()
                dis_loss_opt.backward()
                opt_dis.step()
                iter_dis_loss += dis_loss_opt.item() / args.n_dis
            epoch_dis_loss = epoch_dis_loss * (1 - _mom) + iter_dis_loss * _mom

            iter_gen_loss, iter_x, iter_z = .0, .0, .0
            for j in range(args.n_gen):
                _, gen_loss_opt, cost_x, cost_z = forward(
                    device, args.adv, x, z, gen, inf, dis, dis_x, dis_z, True)
                opt_gen_inf.zero_grad()
                gen_loss_opt.backward()
                opt_gen_inf.step()
                iter_gen_loss += gen_loss_opt.item() / args.n_gen
                iter_x += cost_x.item() / args.n_gen
                iter_z += cost_z.item() / args.n_gen
            epoch_gen_loss = epoch_gen_loss * (1 - _mom) + iter_gen_loss * _mom

            if (i + 1) % 8 == 0:
                _fmt = "epoch {}/{}, iter {}, dis: {:.05f}, gen: {:.05f}, x: {:.05f}, z: {:.05f}"
                tqdm.tqdm.write(
                    _fmt.format(epoch, args.num_epochs, i + 1, iter_dis_loss,
                                iter_gen_loss, iter_x, iter_z))

        if (epoch + 1) % 10 == 0:
            gen.eval()
            inf.eval()
            x_trn_samples = x_trn.samples.to(device)
            z_trn_samples = z_trn.samples.to(device)
            with torch.no_grad():
                p_x = gen(z_trn_samples)
                q_z = inf(x_trn_samples)
                x_rec = gen(q_z).cpu().numpy()
                z_rec = inf(p_x).cpu().numpy()
            tqdm.tqdm.write('Epoch {}/{}, z_rec| mean: {}, var: {}'.format(
                epoch + 1, args.num_epochs, z_rec.mean(axis=0),
                z_rec.var(axis=0, ddof=1)))
            if (epoch + 1) % 10 == 0:
                figure, ax = plt.subplots(1, 1, figsize=(4.5, 4.5))
                ax.scatter(x_rec[:, 0],
                           x_rec[:, 1],
                           label='X_reconstructed',
                           marker='.',
                           alpha=0.3)
                ax.scatter(z_rec[:, 0],
                           z_rec[:, 1],
                           label='Z_reconstructed',
                           marker='.',
                           alpha=0.1)
                ax.set_xlim(-10, 10)
                ax.set_ylim(-10, 10)
                plt.legend()
                plt.savefig(
                    os.path.join(args.results_dir,
                                 'reconstructed_{}.png'.format(epoch + 1)))
                plt.close('all')
    _ckpt = {
        'opt_gen_inf': opt_gen_inf.state_dict(),
        'opt_dis': opt_dis.state_dict(),
        'gen': gen.state_dict(),
        'inf': inf.state_dict(),
        'dis': dis.state_dict(),
    }
    if args.adv:
        _ckpt.update({
            'dis_x': dis_x.state_dict(),
            'dis_z': dis_z.state_dict()
        })
    torch.save(_ckpt, os.path.join(args.results_dir, 'ckpt.pth.tar'))
示例#31
0
 def test_forward(self):
     x = np.array([[1, 2, 3], [4, 5, 6]])
     w = np.array([[1, 0, -1], [0, 1, 1], [1, -1, 0]])
     b = np.array([[1, 3, 2]])
     y = utils.forward(x, w, b)
     self.assertTrue(np.allclose(y, [[5, 2, 3], [11, 2, 3]]))
示例#32
0
 def execute(self, userdata):
     turn(0)
     dive(0)
     forward(0)
     rospy.loginfo("motors should be reset now...")
     return super(Idle, self).execute(userdata)
示例#33
0
    def generator(self):
        f = 7
        p = (f - 1) / 2
        gen = [
            PaddingLayer(p, "reflect"),
            ConvLayer(num_filters=32,
                      kernel_size=7,
                      stride=1,
                      padding="VALID",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      normalizer=self.normalizer,
                      activation=tf.nn.relu),
            ConvLayer(num_filters=64,
                      kernel_size=3,
                      stride=2,
                      padding="SAME",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      normalizer=self.normalizer,
                      activation=tf.nn.relu),
            ConvLayer(num_filters=128,
                      kernel_size=3,
                      stride=2,
                      padding="SAME",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      normalizer=self.normalizer,
                      activation=tf.nn.relu)
        ]

        for i in range(self.n_res_blocks):
            gen.append(
                ResBlock(num_filters=128,
                         kernel_size=3,
                         stride=1,
                         padding_size=1,
                         padding_type="reflect",
                         weights_init=tf.truncated_normal_initializer(
                             stddev=self.weight_stdev),
                         normalizer=self.normalizer,
                         activation=tf.nn.relu))
        gen2 = [
            ConvTransposeLayer(num_outputs=64,
                               kernel_size=3,
                               stride=2,
                               padding="SAME",
                               normalizer=self.normalizer,
                               activation=tf.nn.relu),
            ConvTransposeLayer(num_outputs=32,
                               kernel_size=3,
                               stride=2,
                               padding="SAME",
                               normalizer=self.normalizer,
                               activation=tf.nn.relu),
            PaddingLayer(p, "reflect"),
            ConvLayer(num_filters=3,
                      kernel_size=7,
                      stride=1,
                      padding="VALID",
                      weights_init=tf.truncated_normal_initializer(
                          stddev=self.weight_stdev),
                      normalizer=self.normalizer,
                      activation=tf.tanh)
        ]
        gen = gen + gen2
        return forward(gen)