Esempio n. 1
0
    def __init__(self,model_type,model_size,learning_rate = 0.001):
        self.model_type = model_type
        self.constrain_op = None
        self.x = tf.placeholder(dtype=tf.float32,shape=[None,None,6])
        self.target_y = tf.placeholder(dtype=tf.float32,shape=[None,None,1])

        self.model_size = model_size
        head = self.x
        if(model_type == "lstm"):
            self.fused_cell = tf.nn.rnn_cell.LSTMCell(model_size)

            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)
        elif(model_type.startswith("ltc")):
            learning_rate = 0.01 # LTC needs a higher learning rate
            self.wm = ltc.LTCCell(model_size)
            if(model_type.endswith("_rk")):
                self.wm._solver = ltc.ODESolver.RungeKutta
            elif(model_type.endswith("_ex")):
                self.wm._solver = ltc.ODESolver.Explicit
            else:
                self.wm._solver = ltc.ODESolver.SemiImplicit

            head,_ = tf.nn.dynamic_rnn(self.wm,head,dtype=tf.float32,time_major=True)
            self.constrain_op = self.wm.get_param_constrain_op()
        elif(model_type == "node"):
            self.fused_cell = NODE(model_size,cell_clip=10)
            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)
        elif(model_type == "ctgru"):
            self.fused_cell = CTGRU(model_size,cell_clip=-1)
            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)
        elif(model_type == "ctrnn"):
            self.fused_cell = CTRNN(model_size,cell_clip=-1,global_feedback=True)
            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)
        else:
            raise ValueError("Unknown model type '{}'".format(model_type))

        # target_y = tf.expand_dims(self.target_y,axis=-1)
        self.y = tf.layers.Dense(1,activation=None,kernel_initializer=tf.keras.initializers.TruncatedNormal())(head)
        print("logit shape: ",str(self.y.shape))
        self.loss = tf.reduce_mean(tf.square(self.target_y-self.y))
        optimizer = tf.train.AdamOptimizer(learning_rate)
        self.train_step = optimizer.minimize(self.loss)

        self.accuracy = tf.reduce_mean(tf.abs(self.target_y-self.y))

        self.sess = tf.InteractiveSession()
        self.sess.run(tf.global_variables_initializer())

        self.result_file = os.path.join("results","power","{}_{}.csv".format(model_type,model_size))
        if(not os.path.exists("results/power")):
            os.makedirs("results/power")
        if(not os.path.isfile(self.result_file)):
            with open(self.result_file,"w") as f:
                f.write("best epoch, train loss, train mae, valid loss, valid mae, test loss, test mae\n")

        self.checkpoint_path = os.path.join("tf_sessions","power","{}".format(model_type))
        if(not os.path.exists("tf_sessions/power")):
            os.makedirs("tf_sessions/power")
            
        self.saver = tf.train.Saver()
Esempio n. 2
0
    def __init__(self,
                 model_type,
                 model_size,
                 sparsity_level=0.0,
                 learning_rate=0.001):
        self.model_type = model_type
        self.constrain_op = []
        self.sparsity_level = sparsity_level

        self.x = tf.placeholder(dtype=tf.float32, shape=[None, None, 72])
        self.target_y = tf.placeholder(dtype=tf.int64, shape=[None, None])

        self.model_size = model_size
        head = self.x
        if (model_type == "lstm"):
            self.fused_cell = tf.nn.rnn_cell.LSTMCell(model_size)

            head, _ = tf.nn.dynamic_rnn(self.fused_cell,
                                        head,
                                        dtype=tf.float32,
                                        time_major=True)
        elif (model_type.startswith("ltc")):
            learning_rate = 0.01  # LTC needs a higher learning rate
            self.wm = ltc.LTCCell(model_size)
            if (model_type.endswith("_rk")):
                self.wm._solver = ltc.ODESolver.RungeKutta
            elif (model_type.endswith("_ex")):
                self.wm._solver = ltc.ODESolver.Explicit
            else:
                self.wm._solver = ltc.ODESolver.SemiImplicit

            head, _ = tf.nn.dynamic_rnn(self.wm,
                                        head,
                                        dtype=tf.float32,
                                        time_major=True)
            self.constrain_op.extend(self.wm.get_param_constrain_op())
        elif (model_type == "node"):
            self.fused_cell = NODE(model_size, cell_clip=-1)
            head, _ = tf.nn.dynamic_rnn(self.fused_cell,
                                        head,
                                        dtype=tf.float32,
                                        time_major=True)
        elif (model_type == "ctgru"):
            self.fused_cell = CTGRU(model_size, cell_clip=-1)
            head, _ = tf.nn.dynamic_rnn(self.fused_cell,
                                        head,
                                        dtype=tf.float32,
                                        time_major=True)
        elif (model_type == "ctrnn"):
            self.fused_cell = CTRNN(model_size,
                                    cell_clip=-1,
                                    global_feedback=True)
            head, _ = tf.nn.dynamic_rnn(self.fused_cell,
                                        head,
                                        dtype=tf.float32,
                                        time_major=True)
        else:
            raise ValueError("Unknown model type '{}'".format(model_type))

        self._debug_list_sparse_vars = []
        if (self.sparsity_level > 0):
            self.constrain_op.extend(self.get_sparsity_ops())

        self.y = tf.layers.Dense(2, activation=None)(head)
        print("logit shape: ", str(self.y.shape))
        weight = tf.cast(self.target_y, dtype=tf.float32) * 1.5 + 0.1
        self.loss = tf.losses.sparse_softmax_cross_entropy(
            labels=self.target_y, logits=self.y, weights=weight)
        print("loss shape: ", str(self.loss.shape))
        self.loss = tf.reduce_mean(self.loss)

        optimizer = tf.train.AdamOptimizer(learning_rate)
        self.train_step = optimizer.minimize(self.loss)

        model_prediction = tf.argmax(input=self.y, axis=2)

        lab = tf.cast(self.target_y, dtype=tf.float32)
        pred = tf.cast(model_prediction, dtype=tf.float32)

        # True/False positives/negatives
        tp = tf.reduce_sum(lab * pred)
        tn = tf.reduce_sum((1 - lab) * (1 - pred))
        fp = tf.reduce_sum((1 - lab) * (pred))
        fn = tf.reduce_sum((lab) * (1 - pred))

        # don't divide by zero
        # Precision and Recall
        self.prec = tp / (tp + fp + 0.00001)
        self.recall = tp / (tp + fn + 0.00001)
        # F1-score (Geometric mean of precision and recall)
        self.accuracy = 2 * (self.prec * self.recall) / (
            self.prec + self.recall + 0.000001)

        self.sess = tf.InteractiveSession()
        self.sess.run(tf.global_variables_initializer())

        self.result_file = os.path.join(
            "results", "ozone",
            "{}_{}_{:02d}.csv".format(model_type, model_size,
                                      int(100 * self.sparsity_level)))
        if (not os.path.exists("results/ozone")):
            os.makedirs("results/ozone")
        if (not os.path.isfile(self.result_file)):
            with open(self.result_file, "w") as f:
                f.write(
                    "best epoch, train loss, train acc, valid loss, valid acc, test loss, test acc\n"
                )

        self.checkpoint_path = os.path.join("tf_sessions", "ozone",
                                            "{}".format(model_type))
        if (not os.path.exists("tf_sessions/ozone")):
            os.makedirs("tf_sessions/ozone")

        self.saver = tf.train.Saver()
    def __init__(self,model_type,model_size,sparsity_level=0.0,learning_rate = 0.001):
        self.model_type = model_type
        self.constrain_op = []
        self.sparsity_level = sparsity_level
        self.x = tf.placeholder(dtype=tf.float32,shape=[None,None,5])
        self.target_y = tf.placeholder(dtype=tf.int32,shape=[None,None])

        self.model_size = model_size
        head = self.x
        if(model_type == "lstm"):
            self.fused_cell = tf.nn.rnn_cell.LSTMCell(model_size)

            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)
        elif(model_type.startswith("ltc")):
            learning_rate = 0.005 # LTC needs a higher learning rate
            self.wm = ltc.LTCCell(model_size)
            if(model_type.endswith("_rk")):
                self.wm._solver = ltc.ODESolver.RungeKutta
            elif(model_type.endswith("_ex")):
                self.wm._solver = ltc.ODESolver.Explicit
            else:
                self.wm._solver = ltc.ODESolver.SemiImplicit

            head,_ = tf.nn.dynamic_rnn(self.wm,head,dtype=tf.float32,time_major=True)
            self.constrain_op.extend(self.wm.get_param_constrain_op())
        elif(model_type == "node"):
            self.fused_cell = NODE(model_size,cell_clip=-1)
            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)
        elif(model_type == "ctgru"):
            self.fused_cell = CTGRU(model_size,cell_clip=-1)
            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)
        elif(model_type == "ctrnn"):
            self.fused_cell = CTRNN(model_size,cell_clip=-1,global_feedback=True)
            head,_ = tf.nn.dynamic_rnn(self.fused_cell,head,dtype=tf.float32,time_major=True)
        else:
            raise ValueError("Unknown model type '{}'".format(model_type))

        if(self.sparsity_level > 0):
            self.constrain_op.extend(self.get_sparsity_ops())

        self.y = tf.layers.Dense(2,activation=None)(head)
        print("logit shape: ",str(self.y.shape))
        self.loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(
            labels = self.target_y,
            logits = self.y,
        ))
        optimizer = tf.train.AdamOptimizer(learning_rate)
        self.train_step = optimizer.minimize(self.loss)

        model_prediction = tf.argmax(input=self.y, axis=2)
        self.accuracy = tf.reduce_mean(tf.cast(tf.equal(model_prediction, tf.cast(self.target_y,tf.int64)), tf.float32))

        self.sess = tf.InteractiveSession()
        self.sess.run(tf.global_variables_initializer())

        self.result_file = os.path.join("results","occupancy","{}_{}_{:02d}.csv".format(model_type,model_size,int(100*self.sparsity_level)))
        if(not os.path.exists("results/occupancy")):
            os.makedirs("results/occupancy")
        if(not os.path.isfile(self.result_file)):
            with open(self.result_file,"w") as f:
                f.write("best epoch, train loss, train accuracy, valid loss, valid accuracy, test loss, test accuracy\n")

        self.checkpoint_path = os.path.join("tf_sessions","occupancy","{}".format(model_type))
        if(not os.path.exists("tf_sessions/occupancy")):
            os.makedirs("tf_sessions/occupancy")
            
        self.saver = tf.train.Saver()