예제 #1
0
    #----------Initialize----------
    tf.reset_default_graph()
    #    optimizer1 = tf.train.GradientDescentOptimizer(0.001)
    optimizer1 = tf.train.AdamOptimizer(0.01)
    #    optimizer2 = tf.train.GradientDescentOptimizer(0.0005)
    optimizer2 = tf.train.AdamOptimizer(0.001)

    #----------load regressors----------
    folded_cascode = Folded_Cascode()
    classab = ClassAB()

    folded_cascode_spice = Folded_Cascode_spice()
    classab_spice = ClassAB_spice()

    sxin = make_var(
        "amplifier", "fc_classab", (1, 28),
        tf.random_uniform_initializer(-np.ones((1, 28)), np.ones((1, 28))))
    #==================================================================
    #********************  Tensorflow Initiation  *********************
    #==================================================================
    hardcost, usercost, tf_specs, tf_param, tf_metric, tf_mid, tf_const = graph_tf(
        sxin, folded_cascode, classab)

    opt1 = optimizer1.minimize(hardcost)
    opt2 = optimizer2.minimize(hardcost)
    init = tf.global_variables_initializer()

    calc = 1

    lastvalue = -1000000
    lst_params = []
예제 #2
0
    #==================================================================
    #*****************  Building the graph  ***************************
    #==================================================================

    tf.compat.v1.disable_eager_execution()
    #----------Initialize----------
    tf.compat.v1.reset_default_graph()
    optimizer1 = tf.compat.v1.train.AdamOptimizer(0.01)
    optimizer2 = tf.compat.v1.train.AdamOptimizer(0.001)

    #----------load regressors----------
    lna1 = LNA(tech=28)
    sh1 = SH(tech=28)

    sxin = make_var(
        "RF_FE", "LNA", (1, 9),
        tf.random_uniform_initializer(-np.ones((1, 9)), np.ones((1, 9))))
    hardcost, usercost, tf_specs, tf_params, tf_metrics, tf_const = graph_tf(
        sxin, lna1, sh1)

    #==================================================================
    #********************  Tensorflow Initiation  *********************
    #==================================================================

    opt1 = optimizer1.minimize(usercost)
    opt2 = optimizer2.minimize(hardcost)
    init = tf.compat.v1.global_variables_initializer()

    calc = 1
    lastvalue = -1000000
    lst_params = []
예제 #3
0
        optimizer1 = tf.compat.v1.train.GradientDescentOptimizer(0.01)
        optimizer2 = tf.compat.v1.train.GradientDescentOptimizer(0.001)

    # ----------load regressors----------
    seqp11 = SEQ1()
    seqp21 = SEQ2()
    compp1 = COMPP2()
    thdac1 = THDAC2()

    comppspice1 = Compp_spice2()
    dacthspice1 = DACTH2_spice()
    seqp1spice1 = Seqpart1_spice()
    seqp2spice1 = Seqpart2_spice()

    var_in = make_var(
        "SAR_ADC", "SEQ_COMPP_THDAC", (1, 26),
        tf.random_uniform_initializer(-np.ones((1, 26)), np.ones((1, 26))))

    xload = tf.compat.v1.placeholder(tf.float32, shape=(1, 26))
    initvar = var_in.assign(xload)

    sxin = 2 * tf.math.sigmoid(var_in) - 1.0

    hardcost, softcost, tf_specs, tf_params, tf_metrics, tf_mids, tf_const = graph_tf3(
        sxin, seqp11, seqp21, compp1, thdac1)

    #    tf.float32.real_dtype
    # ==================================================================
    # ********************  Tensorflow Initiation  *********************
    # ==================================================================
예제 #4
0
    #*****************  Building the graph  ***************************
    #==================================================================
    tf.compat.v1.disable_eager_execution()
    #----------Initialize----------
    tf.compat.v1.reset_default_graph()
    #    optimizer1 = tf.train.GradientDescentOptimizer(0.001)
    optimizer1 = tf.compat.v1.train.AdamOptimizer(0.001)
    #    optimizer2 = tf.train.GradientDescentOptimizer(0.0005)
    optimizer2 = tf.compat.v1.train.AdamOptimizer(0.001)

    #----------load regressors----------
    vcdl = vcdl()
    dll = dll()

    sxin = make_var("vcdl", "dll", (1, 10),
                    tf.random_uniform_initializer(-np.ones((1, 10)),
                                                  np.ones((1, 10))))  #Giao#
    #==================================================================
    #********************  Tensorflow Initiation  *********************
    #==================================================================
    hardcost, usercost, tf_specs, tf_param, tf_metric, tf_mid, tf_const = graph_tf(
        sxin, vcdl, dll)

    opt1 = optimizer1.minimize(hardcost)
    opt2 = optimizer2.minimize(hardcost)
    init = tf.compat.v1.global_variables_initializer()

    calc = 1

    lastvalue = -1000000
    lst_params = []
예제 #5
0
    #*****************  Building the graph  ***************************
    #==================================================================
    tf.compat.v1.disable_eager_execution()
    #----------Initialize----------
    tf.compat.v1.reset_default_graph()
    #    optimizer1 = tf.train.GradientDescentOptimizer(0.001)
    optimizer1 = tf.compat.v1.train.AdamOptimizer(0.001)
    #    optimizer2 = tf.train.GradientDescentOptimizer(0.0005)
    optimizer2 = tf.compat.v1.train.AdamOptimizer(0.001)

    #----------load regressors----------
    cs_driver_cml = cs_driver_cml()
    cs_array_8b = cs_array_8b()

    sxin = make_var("cs_driver_cml", "cs_array_8b", (1, 9),
                    tf.random_uniform_initializer(-np.ones((1, 9)),
                                                  np.ones((1, 9))))  #Giao#
    #==================================================================
    #********************  Tensorflow Initiation  *********************
    #==================================================================
    hardcost, usercost, tf_specs, tf_param, tf_metric, tf_mid, tf_const = graph_tf(
        sxin, cs_driver_cml, cs_array_8b)

    opt1 = optimizer1.minimize(hardcost)
    opt2 = optimizer2.minimize(hardcost)
    init = tf.compat.v1.global_variables_initializer()

    calc = 1

    lastvalue = -1000000
    lst_params = []
예제 #6
0
    #*****************  Building the graph  ***************************
    #==================================================================
    tf.compat.v1.disable_eager_execution()
    #----------Initialize----------
    tf.compat.v1.reset_default_graph()
    #    optimizer1 = tf.train.GradientDescentOptimizer(0.001)
    optimizer1 = tf.compat.v1.train.AdamOptimizer(0.001)
    #    optimizer2 = tf.train.GradientDescentOptimizer(0.0005)
    optimizer2 = tf.compat.v1.train.AdamOptimizer(0.001)

    #----------load regressors----------
    dtc1 = DTC1()
    dtc2 = DTC2()

    sxin = make_var(
        "DTC1", "DTC2", (1, 4),
        tf.random_uniform_initializer(-np.ones((1, 4)), np.ones((1, 4))))
    #==================================================================
    #********************  Tensorflow Initiation  *********************
    #==================================================================
    hardcost, usercost, tf_specs, tf_param, tf_metric, tf_mid, tf_const = graph_tf(
        sxin, dtc1, dtc2)

    opt1 = optimizer1.minimize(hardcost)
    opt2 = optimizer2.minimize(hardcost)
    init = tf.compat.v1.global_variables_initializer()

    calc = 1

    lastvalue = -1000000
    lst_params = []
예제 #7
0
    #==================================================================
    tf.compat.v1.disable_eager_execution()
    #----------Initialize----------
    tf.compat.v1.reset_default_graph()

    # Define to optimizers. You can change the values in the parenthesis, which is the learning rate
    optimizer1 = tf.compat.v1.train.AdamOptimizer(0.001)
    optimizer2 = tf.compat.v1.train.AdamOptimizer(0.001)

    #----------load regressors----------

    # Define an object in class INV
    inv = INV()
    # Initialize all design parameters. We use random initialization in this case. We have in total 2 design parameters, 1 module here
    sxin = make_var(
        "INV", "INV", (1, 2),
        tf.random_uniform_initializer(-np.ones((1, 2)), np.ones((1, 2))))
    #==================================================================
    #********************  Tensorflow Initiation  *********************
    #==================================================================
    hardcost, usercost, tf_specs, tf_param, tf_metric, tf_mid, tf_const = graph_tf(
        sxin, inv)

    # Optimizer1 will minimize hardcost, and optimizer 2 will minimize usercost. Both are defined previously.
    opt1 = optimizer1.minimize(hardcost)
    opt2 = optimizer2.minimize(usercost)
    init = tf.compat.v1.global_variables_initializer()

    calc = 1

    lastvalue = -1000000
예제 #8
0
    tf.compat.v1.disable_eager_execution()
    #----------Initialize----------
    tf.compat.v1.reset_default_graph()
    optimizer1 = tf.compat.v1.train.AdamOptimizer(0.01)
    optimizer2 = tf.compat.v1.train.AdamOptimizer(0.001)

    #----------load regressors----------
    vco1 = VCO(tech=65)
    inbuf2 = INBUF2(tech=65)
    #--------load spice netlists--------
    vcospice1 = VCOSpice()
    inbufspice2 = INBUF2Spice()

    var_in = make_var(
        "VCO_ADC", "BUF_VCO", (1, 12),
        tf.random_uniform_initializer(-np.ones((1, 12)), np.ones((1, 12))))

    hardcost, usercost, tf_specs, tf_params, tf_metrics, tf_mids, tf_const = graph_tf2(
        var_in, vco1, inbuf2)

    #==================================================================
    #********************  Tensorflow Initiation  *********************
    #==================================================================

    opt1 = optimizer1.minimize(usercost)
    opt2 = optimizer2.minimize(hardcost)
    init = tf.compat.v1.global_variables_initializer()

    calc = 1