Ejemplo n.º 1
0
def generate_song(filename):
	# Generate data
	delta = get_delta(filename)
	delta = map(int, delta)
	delta, count = get_duration(delta)
	# Set first chord
	song = ['1']
	# Generate rest of song
	for datapoint in delta:
		song.append(get_best_chord(datapoint, song[-randint(1, 4):]))
		if song[-1].isdigit() and song[-1][-1] == '7' and len(song[-1]) == 2 and randint(0, 10) > 5:
			song[-1] = song[0]
		print song[-1]
	return song, count
Ejemplo n.º 2
0
def run(S: List, save_file_name: str, c: float = 1, d: int = 1):
    logging.basicConfig(filename='run_logs/' + save_file_name + '.log',
                        format='%(asctime)s - %(message)s',
                        datefmt='%H:%M:%S',
                        level=logging.INFO)
    logging.info('Entered debug mode.')
    distances = utils.calc_euclidean_distances(np.array(S))
    delta = utils.get_delta(distances)
    t = int(np.ceil(np.log2(1 / delta)))

    logging.info('Points:\n{}'.format(str(S)))
    # logging.info('Distances:\n{}'.format(str(distances)))
    # logging.info('Minimal distance (delta): {}'.format(str(delta)))
    logging.info('t: {}'.format(str(t)))
    # print(str(arr_scaler(distances)))

    logging.info('Building hierarchy.')
    h = Hierarchy(S, distances, c, t)
    logging.info('Done.')

    logging.info('Building LP.')
    lp = LinearProgram(h, d, delta, save_file_name)
    logging.info('Done.')

    logging.info('Solving.')
    lp.solve()
    logging.info('Done.')

    w_hierarchy = []
    for var in [var for var in lp.model.variables() if var.name.startswith('z') and var.value() == 1]:
        split = var.name.split('_')
        i = int(split[1][1:])
        j = int(split[2][1:])
        while len(w_hierarchy) <= i:
            w_hierarchy.append([])
        w_hierarchy[i].append(j)
    w_hierarchy = [sorted(w) for w in w_hierarchy]

    W = [S[w] for w in w_hierarchy[-1]]

    logging.info(f"W hierarchy: {w_hierarchy}")
    logging.info(f"Recall S: {S}")
    S_np = np.array(S)
    logging.info(f"W: {W}")
    W_np = np.array(W)
    if W == S:
        print("(W == S)")

    return W
Ejemplo n.º 3
0
def main():
    if len(sys.argv) > 1:
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect((sys.argv[1], 15975))

    restaurants = read_restaurants()

    yaw = Value('d')
    imu_proc = IMU_Process(yaw)
    imu_proc.start()

    # Stepper is Red Blue White Yellow from top to bottom (towards knob) Dir = LOW means clockwise
    target_angle = Value('d')
    stepper_proc = Stepper_Process(target_angle, 15, 18, 23, 25, 200)
    stepper_proc.start()

    lat, lon = Value('d'), Value('d')
    gps_proc = GPS_Process(lat, lon)
    gps_proc.start()

    print_state = False
    while True:
        bearing = yaw.value
        delta, closest, dist = get_delta(bearing, restaurants,
                                         (lat.value, lon.value))
        target_angle.value = delta

        if math.floor(time.time() % 2) != print_state:
            data = (closest.name, dist, bearing, delta, closest.lat,
                    closest.lon, lat.value, lon.value)

            if len(sys.argv) > 1:
                s.send(pickle.dumps(data))
            else:
                print(
                    'Closest: {}, Dist: {:.2f}\n\tBearing: {:.0f}, Delta: {:.0f}\n\tClosest: {},{}\n\tLoc: {},{}'
                    .format(*data))

            print_state = not print_state
        time.sleep(0.005)
Ejemplo n.º 4
0
    def update_branch(self):
        """Updating a branch in a project.

        This function gets project name, the branch we updating, file content and type from the client,
        then getting the delta for the new version, saving the delta to a new file in the project folder.
        """
        isTxt = False
        isPng = False
        size = self.clientsock.recv(self.BUFSIZ)
        self.clientsock.send("Size Gotten")
        proInfo = self.clientsock.recv(self.BUFSIZ)
        self.clientsock.send("Info Gotten")
        extension = self.clientsock.recv(self.BUFSIZ)
        self.clientsock.send("Extension Gotten")
        size = int(size)
        current_Size = 0
        buffer = b""
        while current_Size < size:
            data = self.clientsock.recv(1024)
            if not data:
                break
            if len(data) + current_Size > size:
                data = data[:size - current_Size]
            buffer += data
            current_Size += len(data)
        self.clientsock.send("Content Gotten")
        proName = proInfo.split('^')[0]
        branchVer = proInfo.split('^')[1]
        proPath = self.PATH + "\\Projects" + "\\" + proName + "\\"
        filesInDir = os.listdir(proPath)
        filesInDir.sort()
        if extension == "png":
            isPng = True
        oldVerContent = ""
        for file in filesInDir:
            if file.split('.')[0] == branchVer.split('.')[0] + "_1":
                with open(proPath + file, 'rb') as branchFile:
                    oldVerContent = branchFile.read()
                if file.split('.')[1] == "txt":
                    isTxt = True
                if file.split('.')[1] == "png":
                    isPng = True
        if isPng == False:
            for x in range(2, int(branchVer.split('.')[1]) + 1):
                for file in filesInDir:
                    if file.split(
                            '.')[0] == branchVer.split('.')[0] + "_" + str(x):
                        with open(proPath + file, 'rb') as oldBranchFile:
                            oldVerContent = utils.restore_delta(
                                oldVerContent, oldBranchFile.read(), isTxt)
                        if file.split('.')[1] == "txt":
                            isTxt = True
                        if file.split('.')[1] == "png":
                            isPng = True
            if extension == "txt":
                isTxt = True
            if isPng == False:
                newBranchContent = utils.get_delta(oldVerContent, str(buffer),
                                                   isTxt)
                branchVer = branchVer.split('.')[0] + "_" + str(
                    int(branchVer.split('.')[1]) + 1)
                proPath = self.PATH + "\\Projects\\" + proName + "\\" + branchVer + "." + extension
                with io.FileIO(proPath, "w") as f:
                    f.write(newBranchContent)
                    f.close()
            else:
                branchVer = branchVer.split('.')[0] + "_" + str(
                    int(branchVer.split('.')[1]) + 1)
                proPath = self.PATH + "\\Projects\\" + proName + "\\" + branchVer + "." + extension
                with io.FileIO(proPath, "w") as f:
                    f.write(buffer)
                    f.close()
        else:
            branchVer = branchVer.split('.')[0] + "_" + str(
                int(branchVer.split('.')[1]) + 1)
            proPath = self.PATH + "\\Projects\\" + proName + "\\" + branchVer + "." + extension
            with io.FileIO(proPath, "w") as f:
                f.write(buffer)
                f.close()
        time.sleep(0.1)
Ejemplo n.º 5
0
    def update_project(self):
        """Updating a non-branch version.

        This function gets a project name, version, content and file type from the client,
        then its getting the delta for the new version,
        saving the delta in the project folder and updating the Database (also with sharing).
        """
        isPng = False
        proName = self.clientsock.recv(self.BUFSIZ)
        self.clientsock.send("OK")
        size = self.clientsock.recv(self.BUFSIZ)
        self.clientsock.send("Size Gotten")
        self.c.execute("SELECT * FROM " + self.username + " WHERE name=:data",
                       {'data': proName})
        pro = str(self.c.fetchone())
        isSharing = pro.split(',')[2]
        isSharing = isSharing.replace(" u'", "").replace("'", "")
        sharing = pro.split(',')[3]
        sharing = sharing.replace(" u'", "").replace("')", "")
        pro = pro.split(',')[1]
        pro = pro[1:2]
        pro = str(int(pro) + 1)
        self.c.execute(
            "UPDATE " + self.username + " SET version=? WHERE name=?",
            [int(pro), proName])
        self.conn.commit()
        if isSharing != "NoOne":
            self.c.execute(
                "UPDATE " + isSharing + " SET version=? WHERE name=?",
                [int(pro), proName])
            self.conn.commit()
            self.c.execute(
                "SELECT sharing FROM " + isSharing + " WHERE name=:data",
                {'data': proName})
            users = str(self.c.fetchall())
            users = users.replace("[(u'", "").replace("',)]", "")
            if "^" in users:
                users = users.split("^")
                for user in users:
                    self.c.execute(
                        "UPDATE " + user + " SET version=? WHERE name=?",
                        [int(pro), proName])
                    self.conn.commit()
        if sharing != "NoOne":
            if "^" in sharing:
                users = sharing.split("^")
                for user in users:
                    self.c.execute(
                        "UPDATE " + user + " SET version=? WHERE name=?",
                        [int(pro), proName])
                    self.conn.commit()
        fileInfo = self.clientsock.recv(self.BUFSIZ)
        if fileInfo == "png":
            isPng = True
        if fileInfo == "txt":
            isTxt = True
        else:
            isTxt = False
        self.clientsock.send("Info Gotten")
        size = int(size)
        current_Size = 0
        buffer = b""
        while current_Size < size:
            data = self.clientsock.recv(1024)
            if not data:
                break
            if len(data) + current_Size > size:
                data = data[:size - current_Size]
            buffer += data
            current_Size += len(data)
        self.clientsock.send("File Gotten")
        proPath = self.PATH + "\\Projects\\" + proName + "\\"
        filesInDir = os.listdir(proPath)
        filesInDir.sort()
        for file in filesInDir:
            if file.split('.')[1] == "png":
                isPng = True
                break
        if isPng == False:
            oldVerContent = ""
            for file in filesInDir:
                if file.split('.')[0] == "1":
                    with open(proPath + file, 'r') as verOneFile:
                        oldVerContent = verOneFile.read()
            for x in range(2, int(pro)):
                for file in filesInDir:
                    if file.split('.')[0] == str(x):
                        with open(proPath + file, 'r') as verXFile:
                            oldVerContent = utils.restore_delta(
                                oldVerContent, verXFile.read(), isTxt)
            delta = utils.get_delta(oldVerContent, str(buffer), isTxt)
            proPath = self.PATH + "\\Projects\\" + proName + "\\" + pro + "." + fileInfo
            with io.FileIO(proPath, "w") as f:
                f.write(delta)
                f.close()
        else:
            proPath = self.PATH + "\\Projects\\" + proName + "\\" + pro + "." + fileInfo
            with io.FileIO(proPath, "w") as f:
                f.write(buffer)
                f.close()
        time.sleep(0.1)
Ejemplo n.º 6
0
def generate_planets(theta, stars=stlr, mes_threshold=10):
    """
    theta = (lnf0, alpha, beta, fB, gamma)
    """
    lnf0, alpha, beta, fB, gamma = theta
    
    planets = pd.DataFrame({'kepid':[], 'koi_prad':[], 'koi_period':[],
                           'koi_prad_true':[], 'koi_max_mult_ev':[]})

    n_skipped = 0
    
    for _, star in stars.iterrows():
        if np.isnan(star.radius) or np.isnan(star.mass):
            n_skipped += 1
            continue
            
        n_planets = poisson(np.exp(lnf0)).rvs()
        if n_planets == 0:
            continue
            
        try:
            star2, flux_ratio = get_companion(theta, star)
        except ValueError:
            n_skipped += 1
            continue
            #logging.warning('Skipping {}; cannot simulate binary.'.format(star.kepid))
        
        for i in range(n_planets):
            # First, figure out true & observed properties of planet
            radius, period = draw_planet(theta) 
            observed_radius, host_star = diluted_radius(radius, star, star2, flux_ratio)
            
            logging.debug('True: {:.2f}, Observed: {:.2f} ({})'.format(radius, 
                                                               observed_radius,
                                                              flux_ratio))
            
            # Then, is it detected?
            # First, geometric:
            aor = get_a(period, host_star.mass)
            if np.isnan(aor):
                raise RuntimeError('aor is nan: P={} M={}'.format(period, host_star.mass))
            #print(host_star.mass, aor)
            transit_prob = get_pgeom(aor / host_star.radius, 0.) # no ecc.
            
            if np.random.random() > transit_prob:
                continue
            
            # Then depth and MES:
            depth = get_delta(observed_radius * R_EARTH / star.radius)
            tau = get_duration(period, aor, 0.) * 24 # no ecc.
            try:
                mes = get_mes(star, period, depth, tau)
            except ValueError:
                n_skipped += 1
                #raise RuntimeError('MES is nan! {}, {}, {}'.format(depth, tau))
                
            
            if mes < mes_threshold:
                continue
            
            # Add planet to catalog
            planets = planets.append({'kepid':star.kepid,
                               'koi_prad':observed_radius,
                               'koi_period':period,
                               'koi_prad_true':radius,
                                'koi_max_mult_ev':mes}, ignore_index=True)
        
    print('{} planets generated ({} of {} stars skipped.)'.format(len(planets),
                                                                 n_skipped, len(stars)))
    return planets
Ejemplo n.º 7
0
    def create_model(self):
        #==================================================================================================#
        with tf.name_scope('inputs'):
            self.input_xs_1 = tf.placeholder(tf.float32, [None, self.ds_1],
                                             name='input_xs_1')
            self.input_xs_2 = tf.placeholder(tf.float32, [None, self.ds_2],
                                             name='input_xs_2')
            self.input_ys_1 = tf.placeholder(tf.int32,
                                             [None, self.class_number],
                                             name='input_ys_1')
            self.input_ys_2 = tf.placeholder(tf.int32,
                                             [None, self.class_number],
                                             name='input_ys_2')
            #----------------------------------------------------------------------------------#
            self.input_xl = tf.placeholder(tf.float32, [None, self.dt],
                                           name='input_xl')
            self.input_yl = tf.placeholder(tf.int32, [None, self.class_number],
                                           name='input_yl')
            self.input_xu = tf.placeholder(tf.float32, [None, self.dt],
                                           name='input_xu')
            self.input_yu = tf.placeholder(tf.int32, [None, self.class_number],
                                           name='input_yu')
            #----------------------------------------------------------------------------------#
            self.lr_1 = tf.placeholder(tf.float32, [], name='lr_1')
            self.lr_2 = tf.placeholder(tf.float32, [], name='lr_2')
            self.input_xt = tf.concat([self.input_xl, self.input_xu],
                                      0,
                                      name='input_xt')
            self.input_ya = tf.concat(
                [self.input_ys_1, self.input_ys_2, self.input_yl],
                0,
                name='input_xa')
        #==================================================================================================#
        # set the number of each layer of the source generator
        self.h_xs_1 = 512  # 2 layers
        self.h_xs_2 = 512
        #----------------------------#
        # set the number of each layer of the target generator
        self.h_xt = 512
        #----------------------------#
        # set the number of each layer of the domain discriminator
        self.h_d_1 = 128
        #------------------------------------------#
        # set the parameters of generator
        self.w_g = {
            'w1_xs_1':
            tf.Variable(
                tf.truncated_normal([self.ds_1, self.h_xs_1], stddev=0.01)),
            'b1_xs_1':
            tf.Variable(tf.truncated_normal([self.h_xs_1], stddev=0.01)),
            'w2_xs_1':
            tf.Variable(tf.truncated_normal([self.h_xs_1, self.d],
                                            stddev=0.01)),
            'b2_xs_1':
            tf.Variable(tf.truncated_normal([self.d], stddev=0.01)),
            #-----------------------------------------------------------------#
            'w1_xs_2':
            tf.Variable(
                tf.truncated_normal([self.ds_2, self.h_xs_2], stddev=0.01)),
            'b1_xs_2':
            tf.Variable(tf.truncated_normal([self.h_xs_2], stddev=0.01)),
            'w2_xs_2':
            tf.Variable(tf.truncated_normal([self.h_xs_2, self.d],
                                            stddev=0.01)),
            'b2_xs_2':
            tf.Variable(tf.truncated_normal([self.d], stddev=0.01)),
            #-----------------------------------------------------------------#
            'w1_xt':
            tf.Variable(tf.truncated_normal([self.dt, self.h_xt],
                                            stddev=0.01)),
            'w2_xt':
            tf.Variable(tf.truncated_normal([self.h_xt, self.d], stddev=0.01)),
            'b1_xt':
            tf.Variable(tf.truncated_normal([self.h_xt], stddev=0.01)),
            'b2_xt':
            tf.Variable(tf.truncated_normal([self.d], stddev=0.01)),
        }
        #-----------------------------#
        # set the parameters of the classifier
        self.w_f = {
            'w_f':
            tf.Variable(
                tf.truncated_normal([self.d, self.class_number], stddev=0.01)),
            #--------------------------------------------------------------------#
            'b_f':
            tf.Variable(tf.truncated_normal([self.class_number], stddev=0.01)),
        }
        #-----------------------------#
        # set the parameters of the domain discriminator
        self.w_d = {
            'w1_d':
            tf.Variable(tf.truncated_normal([self.d, self.h_d_1],
                                            stddev=0.01)),
            'w2_d':
            tf.Variable(tf.truncated_normal([self.h_d_1, 2], stddev=0.01)),
            #--------------------------------------------------------------------#
            'b1_d':
            tf.Variable(tf.truncated_normal([self.h_d_1], stddev=0.01)),
            'b2_d':
            tf.Variable(tf.truncated_normal([2], stddev=0.01))
        }
        #==================================================================================================#
        # build projection network of source domains
        self.projection_xs_1 = utils.build_xs_1(self.input_xs_1, self.w_g,
                                                tf.nn.leaky_relu)
        self.projection_xs_2 = utils.build_xs_2(self.input_xs_2, self.w_g,
                                                tf.nn.leaky_relu)
        # build projection network of target domain
        self.projection_xt = utils.build_t(self.input_xt, self.w_g,
                                           tf.nn.leaky_relu)
        self.projection_xl = tf.slice(self.projection_xt, [0, 0],
                                      [self.nl, -1])
        self.projection_xu = tf.slice(self.projection_xt, [self.nl, 0],
                                      [self.nu, -1])
        # connecting all projection data
        self.all_data = tf.concat(
            [self.projection_xs_1, self.projection_xs_2, self.projection_xt],
            0)
        #==================================================================================================#
        # classification loss L_{G,f,alpha}
        self.f_xs_1_logits = utils.build_f(self.projection_xs_1, self.w_f)
        self.f_xs_2_logits = utils.build_f(self.projection_xs_2, self.w_f)
        self.f_xl_logits = utils.build_f(self.projection_xl, self.w_f)
        self.f_xu_logits = utils.build_f(self.projection_xu, self.w_f)
        self.f_xa_logits = tf.concat(
            [self.f_xs_1_logits, self.f_xs_2_logits, self.f_xl_logits], 0)
        #------------------------------------------------#
        # the accuracy of xs_1
        self.pred_xs_1 = tf.nn.softmax(self.f_xs_1_logits)
        self.correct_pred_xs_1 = tf.equal(tf.argmax(self.input_ys_1, 1),
                                          tf.argmax(self.pred_xs_1, 1))
        self.xs_1_acc = tf.reduce_mean(
            tf.cast(self.correct_pred_xs_1, tf.float32))
        # the accuracy of xs_2
        self.pred_xs_2 = tf.nn.softmax(self.f_xs_2_logits)
        self.correct_pred_xs_2 = tf.equal(tf.argmax(self.input_ys_2, 1),
                                          tf.argmax(self.pred_xs_2, 1))
        self.xs_2_acc = tf.reduce_mean(
            tf.cast(self.correct_pred_xs_2, tf.float32))
        # the accuracy of xl
        self.pred_xl = tf.nn.softmax(self.f_xl_logits)
        self.correct_pred_xl = tf.equal(tf.argmax(self.input_yl, 1),
                                        tf.argmax(self.pred_xl, 1))
        self.xl_acc = tf.reduce_mean(tf.cast(self.correct_pred_xl, tf.float32))
        # the accuracy of xu
        self.pred_yu = tf.nn.softmax(self.f_xu_logits)
        self.correct_pred_xu = tf.equal(tf.argmax(self.input_yu, 1),
                                        tf.argmax(self.pred_yu, 1))
        self.xu_acc = tf.reduce_mean(tf.cast(self.correct_pred_xu, tf.float32))
        #==================================================================================================#
        # adversarial loss L_{d,g,alpha}
        self.domain_xs_1_logits = utils.build_d(self.projection_xs_1, self.w_d,
                                                tf.nn.relu)
        self.domain_xs_2_logits = utils.build_d(self.projection_xs_2, self.w_d,
                                                tf.nn.relu)
        self.domain_xt_logits = utils.build_d(self.projection_xt, self.w_d,
                                              tf.nn.relu)
        self.domain_xa_logits = tf.concat([
            self.domain_xs_1_logits, self.domain_xs_2_logits,
            self.domain_xt_logits
        ], 0)
        # the logist of xs_1 of the domain decriminator
        self.d_pred_xs_1 = tf.nn.softmax(self.domain_xs_1_logits)
        self.d_pred_xs_2 = tf.nn.softmax(self.domain_xs_2_logits)
        # the logist of xt of the domain classifier
        self.d_pred_xt = tf.nn.softmax(self.domain_xt_logits)
        #--------------------------#
        self.d_pred_xs_xt = tf.concat(
            [self.d_pred_xs_1, self.d_pred_xs_2, self.d_pred_xt], 0)
        self.d_pro_xs_xt = tf.reduce_mean(self.d_pred_xs_xt, 0)
        #------------------------------------------#
        self.xs_1_domain_label = tf.one_hot(tf.ones([self.ns_1], tf.int64), 2)
        self.xs_2_domain_label = tf.one_hot(tf.ones([self.ns_2], tf.int64), 2)
        self.xt_domain_label = tf.one_hot(tf.zeros([self.nt], tf.int64), 2)
        self.domain_ya = tf.concat([
            self.xs_1_domain_label, self.xs_2_domain_label,
            self.xt_domain_label
        ], 0)
        #-------------------------------------------#
        self.xs_1_domain_adv_label = tf.one_hot(
            tf.zeros([self.ns_1], tf.int64), 2)
        self.xs_2_domain_adv_label = tf.one_hot(
            tf.zeros([self.ns_2], tf.int64), 2)
        self.xt_domain_adv_label = tf.one_hot(tf.ones([self.nt], tf.int64), 2)
        self.domain_adv_ya = tf.concat([
            self.xs_1_domain_adv_label, self.xs_2_domain_adv_label,
            self.xt_domain_adv_label
        ], 0)
        #==================================================================================================#
        # computer the weights of domains
        #==================================================================================================#
        # compute class centroid matrix
        self.class_mean_xs_1, self.class_mean_xs_2, self.class_mean_xt = utils.computer_class_mean(
            self.projection_xs_1, self.projection_xs_2, self.projection_xl,
            self.projection_xu, self.input_ys_1, self.input_ys_2,
            self.input_yl, self.pred_yu, self.class_number)
        #------------------------------------------------#
        # the weight of domains
        self.delta_1, self.delta_2, self.delta_xs_1, self.delta_xs_2 = utils.get_delta(
            self.class_mean_xs_1, self.class_mean_xs_2, self.class_mean_xt)

        #self.delta_1 = tf.constant(1, tf.float32)
        #self.delta_2 = tf.constant(1, tf.float32)
        #==================================================================================================#
        with tf.name_scope('loss_classifier'):
            #self.loss_classifier = loss.get_loss_classifier(self.f_xa_logits, self.input_ya, self.weight_instance)
            self.loss_classifier = loss.get_loss_classifier(
                self.f_xs_1_logits, self.f_xs_2_logits, self.f_xl_logits,
                self.input_ys_1, self.input_ys_2, self.input_yl, self.delta_1,
                self.delta_2)
        with tf.name_scope('loss_domain'):
            self.loss_domain = loss.get_loss_domain(
                self.domain_xs_1_logits, self.domain_xs_2_logits,
                self.domain_xt_logits, self.xs_1_domain_label,
                self.xs_2_domain_label, self.xt_domain_label, self.delta_1,
                self.delta_2)
            #self.loss_domain = loss.get_loss_domain_square(self.domain_xa_logits, self.domain_ya, self.weight_domain)
        with tf.name_scope('loss_domain_adv'):
            self.loss_domain_adv = loss.get_loss_domain(
                self.domain_xs_1_logits, self.domain_xs_2_logits,
                self.domain_xt_logits, self.xs_1_domain_adv_label,
                self.xs_2_domain_adv_label, self.xt_domain_adv_label,
                self.delta_1, self.delta_2)
            #self.loss_domain_adv = loss.get_loss_domain_square(self.domain_xa_logits, self.domain_adv_ya, self.weight_domain)
        with tf.name_scope('loss_reg_g'):
            self.loss_reg_g = loss.get_loss_reg_g(self.tau, self.w_g, self.w_f)
        with tf.name_scope('loss_diff'):
            self.loss_diff = tf.reduce_sum(tf.abs(self.w_g['w2_xs_1']-self.w_g['w2_xt'])) \
                            + tf.reduce_sum(tf.abs(self.w_g['b2_xs_1']-self.w_g['b2_xt'])) \
                            + tf.reduce_sum(tf.abs(self.w_g['w2_xs_2']-self.w_g['w2_xt'])) \
                            + tf.reduce_sum(tf.abs(self.w_g['b2_xs_2']-self.w_g['b2_xt']))
        #------------------------------------------#
        with tf.name_scope('loss_f'):
            self.loss_f = self.loss_classifier + self.loss_reg_g
        with tf.name_scope('loss_generator'):
            self.loss_generator = self.loss_f + self.loss_diff + self.beta * self.loss_domain_adv
        with tf.name_scope('loss_discriminator'):
            self.loss_discriminator = self.loss_domain
        #==================================================================================================#
        # train step
        self.generator_step = tf.train.AdamOptimizer(self.lr_1).minimize(
            self.loss_generator, var_list=[self.w_g, self.w_f])
        self.discriminator_step = tf.train.AdamOptimizer(self.lr_2).minimize(
            self.loss_discriminator, var_list=[self.w_d])
        #==================================================================================================#
        #writer = tf.summary.FileWriter("log/", self.sess.graph)
        if int((tf.__version__).split('.')[1]) < 12 and int(
            (tf.__version__).split('.')[0]) < 1:
            init = tf.initialize_all_variables()
        else:
            init = tf.global_variables_initializer()
        self.sess.run(init)
Ejemplo n.º 8
0
 def estimate_change_points(self, D, k):
     if self.map_density is None:
         self.solution_path()
     delta = get_delta(D, k)
     return np.array([(np.array(delta.dot(betas)) > 0.04).sum() for betas in self.map_betas])
Ejemplo n.º 9
0
    def backward(self, itr, output_data):
        print("--step#%d backward--" % itr)

        error = 0

        # t_a = self.layer_list[-1].neurons
        # t_d = output_data[itr]
        #
        # if self.loss_function == 'mse':
        #     error = utils.mse_loss(t_a, t_d)

        prev_delta = []
        temp_prev_delta = []
        for layer in reversed(self.layer_list):
            i_current_layer = self.layer_list.index(
                layer)  # index of current layer

            if i_current_layer == 0:
                # current layer is the first layer(input layer).
                break

            prev_layer = self.layer_list[i_current_layer - 1]  # previous layer

            i_neuron = 0
            for neuron in layer.neurons:
                if i_current_layer == (len(self.layer_list) - 1):
                    # for output layer
                    layer.neurons = utils.convert_not_fired(layer.neurons, 50)
                    delta = utils.get_delta(i_neuron=i_neuron,
                                            l_connections=[layer.connections],
                                            t_d=output_data[i_neuron],
                                            t_a=neuron,
                                            t_i=prev_layer.neurons,
                                            tau=self.tau,
                                            d=self.delay,
                                            n_terminals=self.n_terminals,
                                            is_output_layer=True,
                                            prev_delta=None)

                    # if neuron < 0:
                    #     neuron = 40
                    y, w = utils.get_incoming_connections(
                        layer.connections, i_neuron)
                    y = utils.get_y(y, neuron, prev_layer.neurons, self.delay,
                                    self.tau, self.n_terminals)
                    delta_w = (self.lr * y * delta)
                    w = w + delta_w  # update weights
                    utils.update_connections(layer.connections, y, w, i_neuron)
                    temp_prev_delta.append(delta)
                    i_neuron = i_neuron + 1
                else:
                    # for hidden layer (generalied case)
                    if i_current_layer == len(self.layer_list):
                        # first layer --> end point of backwarding
                        break

                    next_layer = self.layer_list[i_current_layer +
                                                 1]  # layer J
                    delta = utils.get_delta(
                        i_neuron=i_neuron,
                        l_connections=[
                            next_layer.connections, layer.connections
                        ],
                        t_j=self.layer_list[i_current_layer + 1].neurons,
                        t_i=neuron,
                        t_h=self.layer_list[i_current_layer - 1].neurons,
                        tau=self.tau,
                        d=self.delay,
                        n_terminals=self.n_terminals,
                        is_output_layer=False,
                        prev_delta=prev_delta)

                    y, w = utils.get_incoming_connections(
                        layer.connections, i_neuron)
                    y = utils.get_y(y, neuron, prev_layer.neurons, self.delay,
                                    self.tau, self.n_terminals)
                    delta_w = -(self.lr * y * delta)
                    w = w + delta_w  # update weights
                    utils.update_connections(layer.connections, y, w, i_neuron)
                    temp_prev_delta.append(delta)
                    i_neuron = i_neuron + 1

            prev_delta.clear()
            prev_delta = temp_prev_delta.copy()
            temp_prev_delta.clear()

        return None