def gibbs_sampling(Out, x):

    vision_set = []
    position_set = []
    name_set = []
    C_t_set = []
    r_t_set = []
    G_set = []
    pi_set = []
    Sigma_set = []
    mu_set = []
    data_num = []
    concept_num = Stick_large_L
    posdist_num = Stick_large_R
    MAP_X = [0 for e in range(data_set_num)]  #MAX x value of map
    MAP_Y = [0 for e in range(data_set_num)]  #MAX y value of map
    map_x = [0 for e in range(data_set_num)]  #min x value of map
    map_y = [0 for e in range(data_set_num)]  #min y value of map
    mu_0_set = []
    print("")

    G_0 = nonpara_tool.stick_breaking(gamma_0, concept_num)  #initialize G_0

    for e, dir in enumerate(Training_list):

        #=============== Get mu_0 ===============

        env_para = np.genfromtxt(dir + "/Environment_parameter.txt",
                                 dtype=None,
                                 delimiter=" ")  #read environment parameter
        print(env_para)

        MAP_X[e] = float(env_para[0][1])
        MAP_Y[e] = float(env_para[1][1])
        map_x[e] = float(env_para[2][1])
        map_y[e] = float(env_para[3][1])
        DATA_initial_index = int(env_para[5][1])
        DATA_last_index = int(env_para[6][1])
        DATA_NUM = DATA_last_index - DATA_initial_index + 1  #calcurate data number

        map_center_x = ((MAP_X[e] - map_x[e]) / 2) + map_x[e]
        map_center_y = ((MAP_Y[e] - map_x[e]) / 2) + map_y[e]
        mu_0 = np.array([map_center_x, map_center_y, 0,
                         0])  #get mu_0 in every environment
        mu_0_set.append(mu_0)

        #=============== Read data ===============

        if e < len(
                Training_list) - 1:  #directory where training name data exists
            name_data_dir = "/name/per_100/"
            test_data_list = dir + "/test_num.txt"
            test_data_num = test_data_read(test_data_list, test_num)
            print("Training environment")

        else:  #directory where test name data exists
            name_data_dir = "/name/per_" + repr(x) + "/"
            test_data_list = dir + "/test_num.txt"
            test_data_num = test_data_read(test_data_list, test_num)
            print("Test environment")

        position_dir = dir + position_data_dir
        vision_dir = dir + vision_data_dir  #if args.vision == None:
        name_dir = dir + name_data_dir

        position = position_data_read(position_dir, DATA_NUM,
                                      test_data_num)  #read positon data
        position_set.append(position)

        vision = vision_data_read(vision_dir, vision_increment, DATA_NUM,
                                  test_data_num)  #read vision data
        vision_set.append(vision)

        name = Name_data_read(name_dir, name_increment, DATA_NUM,
                              test_data_num)  #read name data
        name_set.append(name)

        #=============== Print infomation of read data ================

        print("Read environment directory: " + dir)
        print("Data number: " + repr(DATA_NUM))
        print("Test data number list")
        print(test_data_num)
        print("Name vector sum")
        print(sum(name / name_increment))
        print("")

        #if test environment, next processing is done
        if len(test_data_num) > 1:
            DATA_NUM = DATA_NUM - len(test_data_num)

        Learnig_data_num = (DATA_last_index - DATA_initial_index +
                            1) / Slide  #learning data number
        data_num.append(DATA_NUM)

        #=============== Initialize local parameters ===============

        G = np.random.dirichlet(G_0 + gamma)  #initialize G
        G_set.append(G)

        pi_e = []
        for i in range(concept_num):
            pi = nonpara_tool.stick_breaking(beta,
                                             Stick_large_R)  #initialize pi
            pi_e.append(pi)
        pi_set.append(pi_e)

        c_t = [1000 for n in xrange(DATA_NUM)]  #initialize c_t
        C_t_set.append(c_t)

        r_t = [1000 for n in xrange(DATA_NUM)]  #initialize r_t
        r_t_set.append(r_t)

        mu = init_mu(posdist_num, position, DATA_NUM)  #initialize mu
        mu_set.append(mu)

        Sigma = np.array([[[sigma_init, 0.0, 0.0, 0.0],
                           [0.0, sigma_init, 0.0, 0.0],
                           [0.0, 0.0, sigma_init, 0.0],
                           [0.0, 0.0, 0.0, sigma_init]]
                          for i in range(posdist_num)])  #initialize Sigma
        Sigma_set.append(Sigma)

    VISION_DIM = len(vision_set[0][0])
    NAME_DIM = len(name_set[0][0])

    phi_v = np.array([[float(1.0) / VISION_DIM for i in range(VISION_DIM)]
                      for j in range(concept_num)])  #initialize phi^v
    phi_n = np.array([[float(1.0) / NAME_DIM for i in range(NAME_DIM)]
                      for j in range(concept_num)])  #initialize phi^n

    region_choice = [dr for dr in range(posdist_num)]  #[0,1,2,...,posdist - 1]
    class_choice = [dc for dc in range(concept_num)]  #[0,1,2,...,concept - 1]

    for iter in xrange(iteration):

        print("iteration " + repr(iter + 1))
        posdist_count = [
            [0.0 for i in range(posdist_num)] for j in range(data_set_num)
        ]  #store the number of position distributions in all environments
        concept_count_set = [
        ]  #store the number of concepts in all environments

        for e in xrange(data_set_num):

            #=============== Sampling r_t ===============
            concept_posdist_count = [
                [0.0 for i in range(posdist_num)] for j in range(concept_num)
            ]  #store the number of position distributions in concepts
            concept_count = [0.0 for i in range(concept_num)
                             ]  #store the number of concepts in environment e
            gauss_prob_set = np.zeros(
                (posdist_num, data_num[e]), dtype=float
            )  #store log(p(x_t|mu_{r_t},Sigma_{r_t}) + p(r_t|pi_{C_t}))

            if iter == 0:
                C_t = np.random.randint(concept_num,
                                        size=data_num[e])  #initialize C_t
            else:
                C_t = C_t_set[e]

            pi_e = np.array(pi_set[e])
            pi_data = np.array([pi_e[C_t[d]] for d in range(data_num[e])])
            pi_data = np.log(pi_data)

            for i in range(posdist_num):
                gauss_prob = ss.multivariate_normal.logpdf(
                    position_set[e], mu_set[e][i], Sigma_set[e][i]
                ) + pi_data[:,
                            i]  #caluculate log(p(x_t|mu_{r_t},Sigma_{r_t})+p(r_t|pi_{C_t}))
                gauss_prob_set[i] += gauss_prob

            gauss_prob_set = gauss_prob_set.T
            max_posdist = np.max(gauss_prob_set, axis=1)
            gauss_prob_set = gauss_prob_set - max_posdist[:, None]
            gauss_prob_set = np.exp(gauss_prob_set)
            sum_set = np.sum(gauss_prob_set, axis=1)
            gauss_prob_set = gauss_prob_set / sum_set[:, None]

            for d in xrange(0, data_num[e], Slide):
                r_t_set[e][d] = np.random.choice(
                    region_choice, p=gauss_prob_set[d])  #sampling r_t
                posdist_count[e][r_t_set[e][d]] += 1.0

            r_t = r_t_set[e]

            #=============== Sampling C_t ===============
            multi_prob_set = np.zeros(
                (concept_num, data_num[e]), dtype=float
            )  #store log(p(v_t|phi^v_{C_t}) + p(n_t|phi^n_{C_t}) + p(C_t|G))

            phi_v_log = np.log(phi_v)
            phi_n_log = np.log(phi_n)
            pi_data = np.array([pi_e.T[r_t[d]] for d in range(data_num[e])])
            pi_data = np.log(pi_data)
            G_log = np.log(G_set[e])

            for i in range(concept_num):
                vision_prob = vision_set[e].dot(phi_v_log[i])
                name_prob = name_set[e].dot(phi_n_log[i])
                modal_prob = vision_prob + name_prob + pi_data[:, i]
                modal_prob = modal_prob + G_log[i]
                multi_prob_set[i] += modal_prob

            multi_prob_set = multi_prob_set.T
            max_concept = np.max(multi_prob_set, axis=1)
            multi_prob_set = multi_prob_set - max_concept[:, None]
            multi_prob_set = np.exp(multi_prob_set)
            sum_concept_set = np.sum(multi_prob_set, axis=1)
            multi_prob_set = multi_prob_set / sum_concept_set[:, None]

            for d in xrange(0, data_num[e], Slide):
                C_t_set[e][d] = np.random.choice(
                    class_choice, p=multi_prob_set[d])  #sampling C_t
                concept_count[C_t_set[e][d]] += 1.0
                concept_posdist_count[C_t_set[e][d]][r_t[d]] += 1.0

            #=============== Sampling mu and Sigma ==========
            for r in xrange(posdist_num):
                pos_r = []  #store position data in position distribution r

                #===== calculate average =====
                for d in xrange(data_num[e]):
                    if r_t_set[e][d] == r:
                        pos_r.append(position_set[e][d])

                sum_pose = np.zeros(4)  #([0.0,0.0,0.0,0.0])
                for i in xrange(len(pos_r)):
                    for j in xrange(4):
                        sum_pose[j] += pos_r[i][j]

                bar_pose = np.zeros(4)  #([0.0,0.0,0.0,0.0])
                for i in xrange(4):
                    if sum_pose[i] != 0:
                        bar_pose[i] = sum_pose[i] / len(pos_r)

#===== calculate Mu =====
                Mu = (kappa_0 * mu_0_set[e] + len(pos_r) * bar_pose) / (
                    kappa_0 + len(pos_r))  #Mu updated

                #===== calculate Matrix_R =====
                bar_pose_matrix = np.matrix(bar_pose)
                Matrix_R = np.zeros([4, 4])

                for i in xrange(len(pos_r)):
                    pos_r_matrix = np.matrix(pos_r[i])
                    Matrix_R += ((pos_r_matrix - bar_pose_matrix).T *
                                 (pos_r_matrix - bar_pose_matrix))

                #===== calculate Psi =====
                ans = ((bar_pose_matrix - mu_0_set[e]).T *
                       (bar_pose_matrix - mu_0_set[e])) * (
                           (kappa_0 * len(pos_r)) / (kappa_0 + len(pos_r)))
                Psi = psi_0 + Matrix_R + ans  #Psi updated

                #===== update hyper parameter (Kappa,Nu) =====
                Kappa = kappa_0 + len(pos_r)  #Kappa updated
                Nu = nu_0 + len(pos_r)  #Nu updated

                #===== sampling mu and Sigma from wishrt distribution =====
                Sigma_set[e][r] = Prob_Cal.sampling_invwishartrand(
                    Nu, Psi)  #sampling Sigma
                Sigma_temp = Sigma_set[e][r] / Kappa
                mu_set[e][r] = np.random.multivariate_normal(
                    Mu, Sigma_temp)  #sampling Mu

                if len(pos_r) == 0:  #if no asigned data
                    p = np.array([
                        random.uniform(map_x[e], MAP_X[e]),
                        random.uniform(map_y[e], MAP_Y[e]),
                        random.uniform(-1.0, 1.0),
                        random.uniform(-1.0, 1.0)
                    ])
                    mu_set[e][r] = p
                    Sigma_set[e][r] = np.array([[sigma_init, 0.0, 0.0, 0.0],
                                                [0.0, sigma_init, 0.0, 0.0],
                                                [0.0, 0.0, sigma_init, 0.0],
                                                [0.0, 0.0, 0.0, sigma_init]])

            #=============== Sampling pi and G ===============
            for c in range(concept_num):
                pi_set[e][c] = np.random.dirichlet(concept_posdist_count[c] +
                                                   beta)  #sampling pi

            G_set[e] = np.random.dirichlet(concept_count + (gamma * G_0) +
                                           50) + 1e-100  #sampling G
            concept_count_set.append(concept_count)

        #=============== Sampling phi^v and phi^n and G_0 ===============
        total_vision_set = []  #store the number of vision (visual feature)
        total_name_set = []  #store the number of name
        concept_count = [
            0.0 for i in range(concept_num)
        ]  #array to store the number of concepts in all environments

        for c in xrange(concept_num):

            vision_c = []
            name_c = []

            for e in xrange(data_set_num):
                for d in xrange(data_num[e]):
                    if C_t_set[e][d] == c:
                        vision_c.append(vision_set[e][d])
                        name_c.append(name_set[e][d])
                        concept_count[c] += 1.0

            total_vision = BoF.bag_of_feature(vision_c, VISION_DIM)
            total_vision_set.append(total_vision)
            total_vision = total_vision + alpha_v
            phi_v[c] = np.random.dirichlet(
                total_vision) + 1e-100  #sampling phi_v

            total_name = BoF.bag_of_feature(name_c, NAME_DIM)
            total_name_set.append(total_name)
            total_name = total_name + alpha_n
            phi_n[c] = np.random.dirichlet(
                total_name) + 1e-100  #sampling phi_n

            concept_count[c] += gamma_0

            if len(vision_c) == 0:
                phi_v[c] = np.array(
                    [float(1.0) / VISION_DIM for i in range(VISION_DIM)])
                phi_n[c] = np.array(
                    [float(1.0) / NAME_DIM for i in range(NAME_DIM)])

        G_0 = np.random.dirichlet(concept_count)  #sampling G_0

        if ((iter + 1) % iteration) == 0:

            print("")
            exist_concept_num = 0
            for i in range(concept_num):
                if concept_count[i] > gamma_0:
                    exist_concept_num += 1

            print("Concept number existing:" + repr(exist_concept_num))

            exist_posdist_num = [0 for e in range(data_set_num)]
            for e in range(data_set_num):
                for r in range(posdist_num):
                    if posdist_count[e][r] > 0:
                        exist_posdist_num[e] += 1

            print("Position distribution number existing:" +
                  repr(exist_posdist_num) + "\n")

            print(posdist_count)

            #========== Saving ==========
            print("--------------------------------------------")

            Out_put_dir = "gibbs_result/similar_result/env_num_" + Out + "/per_" + repr(
                x) + "_iter_" + repr(iter + 1)
            print(Out_put_dir)

            try:
                os.mkdir(Out_put_dir)
            except OSError:
                shutil.rmtree(Out_put_dir)
                os.mkdir(Out_put_dir)

            #saving finish time
            finish_time = time.time() - start_time
            f = open(Out_put_dir + "/time.txt", "w")
            f.write("time:" + repr(finish_time) + " seconds.")
            f.close()

            f = open(Out_put_dir + "/training_dataset", 'w')
            for i, d in enumerate(Training_list):
                w = repr(i) + ":  " + d + "\n"
                f.write(w)
            f.close()

            #=============== saving environment parameter ===============
            for i in range(data_set_num):
                os.mkdir(Out_put_dir + "/dataset" + repr(i))
                os.mkdir(Out_put_dir + "/dataset" + repr(i) + "/mu")
                os.mkdir(Out_put_dir + "/dataset" + repr(i) + "/sigma")
                np.savetxt(
                    Out_put_dir + "/dataset" + repr(i) + "/data_concept.txt",
                    C_t_set[i])
                np.savetxt(
                    Out_put_dir + "/dataset" + repr(i) + "/data_posdist.txt",
                    r_t_set[i])
                np.savetxt(
                    Out_put_dir + "/dataset" + repr(i) + "/posdist_count.txt",
                    posdist_count[i])
                np.savetxt(Out_put_dir + "/dataset" + repr(i) + "/pi.csv",
                           pi_set[i])
                np.savetxt(Out_put_dir + "/dataset" + repr(i) + "/G.txt",
                           G_set[i])
                np.savetxt(
                    Out_put_dir + "/dataset" + repr(i) + "/concept_count.txt",
                    concept_count_set[i])
                f = open(Out_put_dir + "/dataset" + repr(i) + "/Parameter.txt",
                         "w")
                f.write("max_x_value_of_map: " + repr(MAP_X[i]) +
                        "\nMax_y_value_of_map: " + repr(MAP_Y[i]) +
                        "\nMin_x_value_of_map: " + repr(map_x[i]) +
                        "\nMin_y_value_of_map: " + repr(map_y[i]) +
                        "\nConcept_num: " + repr(exist_concept_num) +
                        "\nPosition_distribution_num: " +
                        repr(exist_posdist_num[i]) + "\nData_num: " +
                        repr(data_num[i]) + "\nDataset: " + Training_list[i] +
                        "\nSliding_data_parameter: " + repr(Slide) +
                        "\nName_dim: " + repr(NAME_DIM) + "\nVision_dim: " +
                        repr(VISION_DIM) + "\nVision_diric: " +
                        repr(args.vision) + "\nStick_breaking_process_max: " +
                        repr(Stick_large_L) + "\nname_not: " +
                        repr(args.name_not) + "\ntest_num: " + repr(test_num))
                f.close()

                f = open(
                    Out_put_dir + "/dataset" + repr(i) +
                    "/data_concept_posdist.txt", "w")
                for d in range(data_num[i]):
                    if w > 0:
                        f.write("data:" + repr(d) + " C_t:" +
                                repr(C_t_set[i][d]) + " r_t:" +
                                repr(r_t_set[i][d]) + "\n")
                f.close()

                #=============== Saving Gaussian distrbution ===============
                for j in xrange(posdist_num):
                    np.savetxt(
                        Out_put_dir + "/dataset" + repr(i) + "/mu/gauss_mu" +
                        repr(j) + ".csv", mu_set[i][j])
                    np.savetxt(
                        Out_put_dir + "/dataset" + repr(i) +
                        "/sigma/gauss_sigma" + repr(j) + ".csv",
                        Sigma_set[i][j])

            #=============== saving concept parameter ===============
            np.savetxt(Out_put_dir + "/phi_v.csv", phi_v)
            np.savetxt(Out_put_dir + "/phi_n.csv", phi_n)
            np.savetxt(Out_put_dir + "/bag_of_vision.txt", total_vision_set)
            np.savetxt(Out_put_dir + "/bag_of_name.txt", total_name_set)

            f = open(Out_put_dir + "/hyper_parameter.txt", "w")
            f.write("alpha_v: " + repr(alpha_v) + ("\nalpha_n: ") +
                    repr(alpha_n) + ("\ngamma_0: ") + repr(gamma_0) +
                    ("\nkappa_0: ") + repr(kappa_0) + ("\nnu_0: ") +
                    repr(nu_0) + "\ngamma: " + repr(gamma) + "\nbeta: " +
                    repr(beta) + "\ninitial sigma: " + repr(sigma_init) +
                    "\nsitck break limit: " + repr(Stick_large_L) +
                    "\nvision_increment: " + repr(vision_increment) +
                    "\nname_increment: " + repr(name_increment) + "\npsi: [" +
                    repr(psi_0[0][0]) + "\n" + repr(psi_0[1][0]) + "\n" +
                    repr(psi_0[2][0]) + "\n" + repr(psi_0[3][0]) + "]")
            f.close()

    print name_data_dir
Example #2
0
def gibbs(data_pose, data_feature, data_word, word_data_ind):

    if args.Nonpara:

        pi = nonpara_tool.stick_breaking(gamma, Stick_large_L)
        clas_num = len(pi)
        print "Stick breakin process doneļ¼Ž"
    else:
        global clas_num
    print clas_num
    #Initializing the mean of Gussian distribution

    Myu_Ct = np.array([[
        random.uniform(map_x, MAP_X),
        random.uniform(map_y, MAP_Y),
        random.uniform(-1, 1),
        random.uniform(-1, 1)
    ] for i in xrange(clas_num)])
    #########======initialize Gaussian parameter:mu===========###########
    for j in range(clas_num):
        index = random.uniform(0, clas_num)
        p = data_pose[int(index)]
        Myu_Ct[0] = p
    #########==================#############
    initial_mu = []
    for i, p in enumerate(Myu_Ct):
        initial_mu.append(p)
    initial_mu = np.array(initial_mu)
    #Initializing covariance matrix of positional Gaussian dis
    Sigma_Ct = [
        np.matrix([[sigma_init, 0.0, 0.0, 0.0], [0.0, sigma_init, 0.0, 0.0],
                   [0.0, 0.0, sigma_init, 0.0], [0.0, 0.0, 0.0, sigma_init]])
        for i in range(clas_num)
    ]

    #Initializing the mean of Multinomial distribution for Image features
    fi_Ct = np.array([[float(1.0) / FEATURE_DIM for i in range(FEATURE_DIM)]
                      for j in range(clas_num)])
    if args.Word:
        #Initializing the mean of Multinomial distribution for Words
        ramda_Ct = np.array(
            [[float(1.0) / word_class for i in range(word_class)]
             for j in range(clas_num)])

    C_t = np.array([n for n in xrange(clas_num)])

    #Initializing class index of data.
    data_c = np.array([1000 for n in xrange(DATA_NUM)])

    for iter in xrange(iteration):
        print 'Iteration.' + repr(iter + 1) + '\n'

        #<<<<<Sampling class index C_t<<<<

        print 'Sampling calss index...\n'

        for d in xrange(0, DATA_NUM, Slide):

            prob_C_t = np.array([0.0 for i in xrange(clas_num)])

            for i in range(clas_num):
                prob_C_t[i] += Prob_Cal.multi_gaussian_log(
                    data_pose[d], Myu_Ct[i], Sigma_Ct[i])
                prob_C_t[i] += math.log(pi[i])

                prob_C_t[i] += Prob_Cal.multi_nomial_log(
                    data_feature[d], fi_Ct[i])

                if args.Word:
                    if sum(data_word[d]) != 0:
                        data_word[d] = np.array(data_word[d])
                        prob_C_t[i] += Prob_Cal.multi_nomial_log(
                            data_word[d], ramda_Ct[i])

            max_class = np.argmax(prob_C_t)
            prob_C_t -= prob_C_t[max_class]
            prob_C_t = np.exp(prob_C_t)
            prob_C_t = Prob_Cal.normalize(prob_C_t)  #Normalize weight.

            data_c[d] = np.random.choice(C_t, p=prob_C_t)
            print 'Iteration:', iter + 1, 'Data:', d + DATA_initial_index, 'max_prob', max_class, ":", prob_C_t[
                max_class], 'Class index', data_c[d]

        #<<<<<Sampling Gaussian parameter Myu_Ct , Sigma_Ct.

        print 'Started sampling parameters of Position Gaussian dist...\n'

        for c in xrange(clas_num):
            pose_c = []
            #========Calculating average====
            for d in xrange(len(data_c)):
                if data_c[d] == c:
                    pose_c.append(data_pose[d])
            sum_pose = np.array([0.0, 0.0, 0.0, 0.0])
            for i in xrange(len(pose_c)):
                for j in xrange(4):
                    sum_pose[j] += pose_c[i][j]

            bar_pose = np.array([0.0, 0.0, 0.0, 0.0])
            for i in xrange(4):
                if sum_pose[i] != 0:
                    bar_pose[i] = sum_pose[i] / len(pose_c)

#=========Calculating Mu=============
            Mu = (kappa_0 * mu_0 + len(pose_c) * bar_pose) / (kappa_0 +
                                                              len(pose_c))

            #=========Calculating Matrix_C===================
            bar_pose_matrix = np.matrix(bar_pose)

            Matrix_C = np.zeros([4, 4])
            for i in xrange(len(pose_c)):
                pose_c_matrix = np.matrix(pose_c[i])
                Matrix_C += ((pose_c_matrix - bar_pose_matrix).T *
                             (pose_c_matrix - bar_pose_matrix))

            #=======Calculating Psai===============
            ans = ((bar_pose_matrix - mu_0).T *
                   (bar_pose_matrix - mu_0)) * ((kappa_0 * len(pose_c)) /
                                                (kappa_0 + len(pose_c)))
            Psai = psai_0 + Matrix_C + ans

            #=======Updating hyper parameter:Kappa,Nu===============================
            Kappa = kappa_0 + len(pose_c)
            Nu = nu_0 + len(pose_c)

            #============Sampling fron wishrt dist====================

            Sigma_Ct[c] = Prob_Cal.sampling_invwishartrand(Nu, Psai)
            Sigma_temp = Sigma_Ct[c] / Kappa

            Myu_Ct[c] = np.random.multivariate_normal(Mu, Sigma_temp)

            #No asigned data
            if len(pose_c) == 0:
                index = random.uniform(0, clas_num)
                p = data_pose[int(index)]
                Myu_Ct[c] = p
                #Myu_Ct[c]=[random.uniform(map_x,MAP_X ),random.uniform(map_y,MAP_Y),random.uniform(-1,1),random.uniform(-1,1)]
                Sigma_Ct[c] = np.matrix([[sigma_init, 0.0, 0.0, 0.0],
                                         [0.0, sigma_init, 0.0, 0.0],
                                         [0.0, 0.0, sigma_init, 0.0],
                                         [0.0, 0.0, 0.0, sigma_init]])
            #print Myu_Ct[c]
            #print Sigma_Ct[c]

        print 'Finished sampling parameters of Position Gaussian dist...\n'

        #<<<<<<Sampling Parameter of Multinomial fi_Ct>>>>>>>>>>>>>>>>>>

        print 'Started sampling parameters of Image features Multinomial dist...\n'

        for c in xrange(clas_num):
            feat_c = []
            for d in xrange(len(data_c)):
                if data_c[d] == c:
                    feat_c.append(data_feature[d])
            #print repr(j+1)+' '+repr(feat_c)+'\n'

            total_feat = BoF.bag_of_feature(feat_c, FEATURE_DIM)
            total_feat = total_feat + alfa
            fi_Ct[c] = np.random.dirichlet(total_feat)
            if len(feat_c) == 0:
                fi_Ct[c] = [
                    float(1.0) / FEATURE_DIM for i in range(FEATURE_DIM)
                ]

        print 'Finished sampling parameters of Image features Multinomial dist...\n'

        #If you estimate space name,you should set Word as True
        #<<<<Sampling word dist parametrer:ramda_ct>>>>>>>>>>>>>>>>>>>>>
        if args.Word:
            print 'Started sampling parameters of word Multinomial dist...\n'

            word_distribusion = []
            for c in xrange(clas_num):

                word_c = []
                for d in xrange(len(data_c)):
                    if data_c[d] == c:
                        word_c.append(data_word[d])

                total_word = BoF.bag_of_words(word_c, word_class)

                word_distribusion.append(total_word)

                total_word = total_word + beta
                ramda_Ct[c] = np.random.dirichlet(total_word)

                #Not data in class
                if len(word_c) == 0:
                    ramda_Ct[c] = [
                        float(1.0) / word_class for i in range(word_class)
                    ]

            print 'Finished sampling parameters of Word Multinomial dist...\n'

        # If you use Nonparametric Bayse model,you should set Nonpara as True.
        if args.Nonpara:
            print 'Started sampling parameters of index Multinomial dist...\n'
            #<<<<<Sampling paremeter(pi) of class multinomial dist>>>>>>>>>>>

            class_count = [0 for i in range(clas_num)]

            for c in xrange(clas_num):
                for d in xrange(len(data_c)):
                    if data_c[d] == c:
                        class_count[c] += 1.0
                class_count[c] += gamma

            pi = np.random.dirichlet(class_count)
            print 'Finished sampling parameters of index Multinomial dist...\n'

        print 'Iteration ', iter + 1, ' Done..\n'
    C_num = clas_num
    if args.Nonpara:
        C_num = 0
        for i in range(len(class_count)):
            if class_count[i] > gamma:
                C_num += 1

        print "Class num:" + repr(C_num) + "\n"

    #=====Saving===========================================

    os.mkdir(Out_put_dir)
    os.mkdir(Out_put_dir + "/mu")
    os.mkdir(Out_put_dir + "/sigma")
    os.mkdir(Out_put_dir + "/image_multi")
    os.mkdir(Out_put_dir + "/class")
    os.mkdir(Out_put_dir + "/word")
    for i in xrange(clas_num):
        #Writing parameter of positional Gaussian dist
        np.savetxt(Out_put_dir + "/mu/gauss_mu" + repr(i) + ".csv", Myu_Ct[i])
        np.savetxt(Out_put_dir + "/sigma/gauss_sgima" + repr(i) + ".csv",
                   Sigma_Ct[i])
        #Writing parameter of image features multinomial dist
        np.savetxt(Out_put_dir + "/image_multi/fi_" + repr(i) + ".csv", fi_Ct)
        #Writing class indexes
        all = []
        #k=0
        for r in xrange(len(data_c)):
            if i == data_c[r]:
                r = r + DATA_initial_index
                all.append(r)
        np.savetxt(Out_put_dir + "/class/class" + repr(i) + ".txt",
                   all,
                   fmt="%d")
        if args.Word:
            #Writing parameters of word multinomial dist
            f = open(
                Out_put_dir + "/word/word_distribution" + repr(i) + '.txt',
                'w')
            for w in xrange(word_class):
                f.write(repr(ramda_Ct[i][w]) + "\n")
            f.close()
    #Writing all index
    np.savetxt(Out_put_dir + "/all_class.csv", data_c, fmt="%d")

    #saving finish time
    finish_time = time.time() - start_time

    f = open(Out_put_dir + "/time.txt", "w")
    f.write("time:" + repr(finish_time) + " seconds.")
    f.close()

    #====Writing Parameter===========

    f = open(Out_put_dir + "/Parameter.txt", "w")

    f.write("max_x_value_of_map: " + repr(MAP_X) + "\nMax_y_value_of_map: " +
            repr(MAP_Y) + "\nMin_x_value_of_map: " + repr(map_x) +
            "\nMin_y_value_of_map: " + repr(map_y) + "\nNumber_of_place: " +
            repr(clas_num) + "\nData_num: " + repr(Learnig_data_num) +
            "\nSliding_data_parameter: " + repr(Slide) + "\nWord_class: " +
            repr(word_class) + "\nDataset: " + data_diric +
            "\nEstimated_place_num: " + repr(C_num) +
            "\nNonparametric_Bayse: " + repr(args.Nonpara) +
            "\nImage_feature_dim: " + repr(FEATURE_DIM) +
            "\nUsing_word_data: " + repr(args.Word) +
            "\nStick_breaking_process_max: " + repr(Stick_large_L))
    f.close()

    f = open(Out_put_dir + "/hyper parameter.txt", "w")
    f.write("alfa: " + repr(alfa) + "\n beta: " + repr(beta) +
            ("\nkappa_0: ") + repr(kappa_0) + ("\nnu_0: ") + repr(nu_0) +
            "\nmu_0: " + repr(mu_0) + "\npsai_0: " + repr(psai_0) +
            "\ngamma: " + repr(gamma) + "\ninitial sigma: " +
            repr(sigma_init) + "\nsitck break limit: " + repr(Stick_large_L))
    if args.Word:
        f.write("\nspace_name:")
        for i in range(len(space_name)):
            f.write(space_name[i] + ",")

    f.write("\nIteration:" + repr(iteration))
    f.close()
    print "%1.3f second" % (finish_time)
    if args.Nonpara:
        np.savetxt(Out_put_dir + "/pi.csv", pi)
    np.savetxt(Out_put_dir + "/initial_mu.csv", initial_mu)
    np.savetxt(Out_put_dir + "/last_mu.csv", Myu_Ct)
    if args.Word:
        f = open(Out_put_dir + "/word_data_class.txt", "w")
        f.write("data space_name class\n")
        for j in word_data_ind:
            vec = data_word[j]
            for i in range(len(space_name)):
                if vec[i] != 0:
                    f.write(
                        repr(j + DATA_initial_index) + " " + space_name[i] +
                        " " + repr(data_c[j]) + "\n")