예제 #1
0
def init(img_width,
         centroids=[2, 4, 16, 32, 64, 32, 16, 8],
         video_file=czm.homeFld + "/Dropbox/destin/moving_square.mov",
         temperature=2.0,
         learn_rate=0.1,
         layer_widths=None):
    global network, layers, video_source, layerMask, top_layer
    layers = len(centroids)
    top_layer = layers - 1
    network = pd.DestinNetworkAlt(img_width, layers, centroids, True, 1,
                                  layer_widths)

    temps = []
    for i in xrange(layers):
        temps.append(temperature)

    pd.SetLearningStrat(network.getNetwork(), pd.CLS_FIXED)

    network.setTemperatures(temps)
    network.setFixedLearnRate(learn_rate)
    network.setBeliefTransform(pd.DST_BT_P_NORM)

    layerMask = pd.SWIG_UInt_Array_frompointer(network.getNetwork().layerMask)

    video_source = pd.VideoSource(False, video_file)
    video_source.setSize(img_width, img_width)
예제 #2
0
 def test_DestinNetworkAlt(self):
     layer_widths=[32, 16, 8, 7, 6, 5, 4, 3, 2, 1]
     centroids=   [2,   8, 32,32,64,16,16,16,16,4]
     network = pd.DestinNetworkAlt(pd.W512,len(layer_widths), centroids, True, 1, layer_widths)
     node = pd.GetNodeFromDestin(network.getNetwork(), 0, 1, 1)
     self.assertEqual(1, node.nParents)
     node = pd.GetNodeFromDestin(network.getNetwork(), 2, 1, 1)
     self.assertEqual(4, node.nParents)
예제 #3
0
def init_destin(siw=pd.W512,
                nLayer=8,
                centroids=[4, 8, 16, 32, 64, 32, 16, 8],
                isUniform=True,
                imageMode=pd.DST_IMG_MODE_GRAYSCALE):
    temp_network = pd.DestinNetworkAlt(siw, nLayer, centroids, isUniform,
                                       imageMode)
    #temp_network.setBeliefTransform(pd.DST_BT_NONE)

    return temp_network
예제 #4
0
    ims.addImage(czm.homeFld +
                 "/Downloads/destin_toshare/train images/%s.png" % l)

if not batch_mode:
    centroids = [4, 8, 16, 32, 64, 32, 16, len(letters)]
else:
    centroids = [start_centroids for i in range(7)]
    centroids.append(len(letters))

layers = len(centroids)
top_layer = layers - 1
draw_layer = top_layer
iterations = 3000
#image_mode = pd.DST_IMG_MODE_RGB
image_mode = pd.DST_IMG_MODE_GRAYSCALE
dn = pd.DestinNetworkAlt(pd.W512, layers, centroids, True, None, image_mode)
dn.setFixedLearnRate(.1)
dn.setBeliefTransform(pd.DST_BT_NONE)
pd.SetLearningStrat(dn.getNetwork(), pd.CLS_FIXED)

if batch_mode:
    dn.setFrequencyCoefficients(freq_coeff, freq_treshold, add_coeff)
    dn.setStarvationCoefficient(starv_coeff)
    dn.setMaximumCentroidCounts(max_centroids)

#dn.setBeliefTransform(pd.DST_BT_P_NORM)
#ut=1.5
#dn.setTemperatures([ut,ut,ut,ut,ut,ut,ut,ut])

weight_exponent = 4
예제 #5
0
파일: som.py 프로젝트: yantrabuddhi/destin
cifar_batch = 1  #which CIFAR batch to use from 1 to 5
cs = pd.CifarSource(cifar_dir, cifar_batch)

#must have 4 layers because the cifar data is 32x32
layers = 4
centroids = [7, 5, 5, 5]

# How many CIFAR images to train destin with. If larger than
# If this this is larger than the number of possible CIFAR images then some
# images will be repeated
training_iterations = 20000

som_train_iterations = 10000

is_uniform = True  # uniform DeSTIN or not
dn = pd.DestinNetworkAlt(pd.W32, layers, centroids, is_uniform)

# I turned off using previous beliefs in DeSTIN because I dont
# think they would be useful in evaluating static images.
dn.setParentBeliefDamping(0)
dn.setPreviousBeliefDamping(0)

# The som trains on concatenated beliefs starting from this layer to the top layer.
# If  bottom_belief_layer = 0 then it will use all the beliefs from all the layers.
# If bottom_belief_layer = 3 then only the top layer's beliefs will be used.
bottom_belief_layer = 2

# BeliefExporter - picks which beliefs from destin to show to the SOM
be = pd.BeliefExporter(dn, bottom_belief_layer)

# How many times  at once an individual CIAR image should be shown to destin in one training iteration
예제 #6
0
        g.write("tree #%i:\n%s\n" % (t, tm.getMinedTreeStructureAsString(t)))

    f.close()
    g.close()

    shutil.copy(dst_save_file, out_dir + "network_save.bin")

    for i, l in enumerate(letters):
        shutil.copy("%s%s.png" % (img_path, l),
                    out_dir + "input_img_%i.png" % (i))


## Init destin
centroids = [2, 2, 4, 8, 32, 16, 8, 3]
layers = len(centroids)
dn = pd.DestinNetworkAlt(pd.W512, layers, centroids, True)
dn.setBeliefTransform(pd.DST_BT_P_NORM)
#dn.setBeliefTransform(pd.DST_BT_NONE)
#dn.setBeliefTransform(pd.DST_BT_BOLTZ)

uniform_temp = 2
temperatures = []
for i in range(8):
    temperatures.append(uniform_temp)
#temperatures = [5, 5, 10, 20, 40, 20, 16, 6]

dn.setTemperatures(temperatures)
dn.setIsPOSTraining(True)

dn.setCentImgWeightExponent(4)
예제 #7
0
letters = "ABCDE"
for l in letters:
    ims.addImage(czm.homeFld +
                 "/Downloads/destin_toshare/train images/%s.png" % l)

if not batch_mode:
    centroids = [4, 8, 16, 32, 64, 32, 16, len(letters)]
else:
    centroids = [start_centroids for i in range(7)]
    centroids.append(len(letters))

layers = len(centroids)
top_layer = layers - 1
draw_layer = top_layer
iterations = 3000
dn = pd.DestinNetworkAlt(pd.W512, 8, centroids, True)
dn.setFixedLearnRate(.1)
dn.setBeliefTransform(pd.DST_BT_NONE)
pd.SetLearningStrat(dn.getNetwork(), pd.CLS_FIXED)

if batch_mode:
    dn.setFrequencyCoefficients(freq_coeff, freq_treshold, add_coeff)
    dn.setStarvationCoefficient(starv_coeff)
    dn.setMaximumCentroidCounts(max_centroids)

#dn.setBeliefTransform(pd.DST_BT_P_NORM)
#ut=1.5
#dn.setTemperatures([ut,ut,ut,ut,ut,ut,ut,ut])

weight_exponent = 4
예제 #8
0
 def test_tree_sizes(self):
     layer_widths=[32, 16, 8, 7, 6, 5, 4, 3, 2, 1]
     centroids=   [2,   8, 32,32,64,16,16,16,16,4]
     network = pd.DestinNetworkAlt(pd.W512,len(layer_widths), centroids, True, 1, layer_widths)
     dtm = pd.DestinTreeManager(network, 0)
     print "Size is:" + str(dtm.getWinningCentroidTreeSize())
예제 #9
0
layers = len(centroids)
top_layer = layers - 1

layers_to_enum = {
        1: pd.W4,
        2: pd.W8,
        3: pd.W16,
        4: pd.W32,
        5: pd.W64,
        6: pd.W128,
        7: pd.W256,
        8: pd.W512}

img_width = layers_to_enum[layers]

dn = pd.DestinNetworkAlt(img_width, layers, centroids, True)

dn.setBeliefTransform(pd.DST_BT_P_NORM)

ct=2.0
dn.setTemperatures([ct,ct,ct,ct,ct,ct,ct,ct])
dn.setFixedLearnRate(.1)


top_node = dn.getNode(top_layer, 0, 0)

vs = pd.VideoSource(False, "hand.m4v")
vs.setSize(img_width, img_width)

vs.enableDisplayWindow()
예제 #10
0
def init_destin(siw=pd.W512, nLayer=8, centroids=[4,8,16,32,64,32,16,8],
                isUniform=True, extRatio=1):
    
    temp_network = pd.DestinNetworkAlt(siw, nLayer, centroids, isUniform, extRatio)
    #temp_network.setBeliefTransform(pd.DST_BT_NONE)
    return temp_network