Esempio n. 1
0
def run():
    time.clock()
    t0 = float(time.clock())

    # load data from file, and do normalization on X.
    [trainX, trainY, testX, testY] = ld.LoadData()
    t1 = float(time.clock())
    print 'Loading data from File. using time %.4f s, \n' % (t1 - t0)

    [trainX, testX] = nor.Normalization(trainX, testX)
    t2 = float(time.clock())
    print 'Normalization on train & test X. using time %.4f s, \n' % (t2 - t1)

    # implementation assignments
    lr_reg = [0.001, 0.01, 0.1, 1, 10, 100]  #learning rate
    max_iter = 1000000  # max iteration
    eps = 0.001  # gradient comparing epsilon
    lmd_reg = [0, 0.0001, 0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10,
               100]  # regularization lambda

    # part 1, lamda = 0, different learning rate
    best_lr = run_part1(trainX,
                        trainY)  #default lr,grad_epsilon and max_iterations
    # [lr,bestloss,weight,lossCont] = HW1_part_1(trainX,trainY) #default lr,grad_epsilon and max_iterations
    t3 = float(time.clock())
    print 'Part 1, lamda = 0, changing lr, using time %.4f s, \n' % (t3 - t2)

    # part2: fixed learning rate, different lamda
    max_iter = 10000
    run_part2(trainX, trainY, testX, testY, lmd_reg, best_lr, eps, max_iter)
    t4 = float(time.clock())
    print 'Part 2, lr = 0.05, changing lmd, using time %.4f s, \n' % (t4 - t3)

    # part3: fixed lr, using 10-fold cross-validation
    # split training data into k parts
    max_iter = 1000
    k = 10
    run_part3(trainX, trainY, testX, testY, best_lr, eps, max_iter, lmd_reg, k)
    t5 = float(time.clock())
    print 'Part 3, lr = 0.05, fidining the best lmd, using time %.4f s, \n' % (
        t4 - t3)
Esempio n. 2
0
def get_style_model_and_losses(device,
                               cnn,
                               normalization_mean,
                               normalization_std,
                               style_img,
                               content_img,
                               content_layers=content_layers_default,
                               style_layers=style_layers_default):
    cnn = copy.deepcopy(cnn)

    # normalization module
    normalization = norm.Normalization(normalization_mean,
                                       normalization_std).to(device)

    # just in order to have an iterable access to or list of content/syle
    # losses
    content_losses = []
    style_losses = []

    # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
    # to put in modules that are supposed to be activated sequentially
    model = nn.Sequential(normalization)

    i = 0  # increment every time we see a conv
    for layer in cnn.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = 'conv_{}'.format(i)
        elif isinstance(layer, nn.ReLU):
            name = 'relu_{}'.format(i)
            # The in-place version doesn't play very nicely with the ContentLoss
            # and StyleLoss we insert below. So we replace with out-of-place
            # ones here.
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_{}'.format(i)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_{}'.format(i)
        else:
            raise RuntimeError('Unrecognized layer: {}'.format(
                layer.__class__.__name__))

        model.add_module(name, layer)

        if name in content_layers:
            # add content loss:
            target = model(content_img).detach()
            content_loss = cl.ContentLoss(target)
            model.add_module("content_loss_{}".format(i), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            # add style loss:
            target_feature = model(style_img).detach()
            style_loss = sl.StyleLoss(target_feature)
            model.add_module("style_loss_{}".format(i), style_loss)
            style_losses.append(style_loss)

    # now we trim off the layers after the last content and style losses
    for i in range(len(model) - 1, -1, -1):
        if isinstance(model[i], cl.ContentLoss) or isinstance(
                model[i], sl.StyleLoss):
            break

    model = model[:(i + 1)]

    return model, style_losses, content_losses
Esempio n. 3
0
        # 可视化径向剖分结果
        # img = DrawFeature(feature_select1, feature_select2)
        #         # img_path1 = csv_list_same[i][0].replace('.csv', '.jpg')
        #         # img_path2 = csv_list_same[i][1].replace('.csv', '.jpg')
        #         # img.draw(img_path1, img_path2)

        feature_vec1 = FeatureVec(feature_select1.x_fv, feature_select1.y_fv, feature_select1.featuredirect).feature_vec_common
        feature_vec2 = FeatureVec(feature_select2.x_fv, feature_select2.y_fv, feature_select2.featuredirect).feature_vec_common
        l_s1 = len(feature_vec1)
        l_s2 = len(feature_vec2)
        # 剔除掉特征数少于3个的,因为特征数小于3则无法进行径向剖分
        if l_s1<3 or l_s2 <3:
            continue

        # 特征矩阵归一化
        featurevec1 = Normalization(max, min).normalize(feature_vec1)
        featurevec2 = Normalization(max, min).normalize(feature_vec2)

        # 将同源鞋印的特征索引列表,每个元素左移
        l_s = len(featurevec1)
        matrix = list(range(l_s))
        # 移动后的结果列表组合
        m = roll_list(matrix)
        d_combine = []
        for l in m:
            fv_modify = featurevec2[l, :]
            d = CalcFeatureDistance(featurevec1, fv_modify).distance
            d_combine.append(d)
        # 特征扭转的结果排序,取最小值
        d_combine.sort()
        d = d_combine[0]
Esempio n. 4
0
import DataInit, AddBias, GradientDescent, Normalization, runMachine
import os.path
import numpy as np

BASE = os.path.dirname(os.path.abspath(__file__))

print(os.path.join(BASE, "DataNew1.csv"))
Path = os.path.join(BASE, "DataNew1.csv")

# Path = "..\MachineLearningMF\Data.txt"
'''somethin somthin'''
data = DataInit.DataInit()
CostFuntion = runMachine.runMachine()
GDescent = GradientDescent.GradientDescent()
Norm = Normalization.Normalization()
AddBias1 = AddBias.AddBias()
'''loac csv'''
data.loader(Path)  # ,제거
'''theta init'''
'''initiated optimal theta 17/9/2019'''
# 동 별 theta값 입력 테스트
# theta = np.array([[theta0], [theta1], [theta2], [theta3], [theta4]])
theta = np.array([[0], [0], [0], [0], [0]])
'''Normalize'''
data.x, mu, sigma = Norm.featureNormalize(data)
'''Add Bias Column'''
data = AddBias1.addB(data)
'''remove annotations when you compute theta again'''
'''run Gradient descent and Cost function'''
theta = GDescent.runGradient(data, theta, 0.0001, 100000)  #theta값 뽑아내기
theta = theta.reshape(1, 5)  #행렬 곱셈하기 위해 형태바꾸기