print "done" alltrainfeatures = numpy.vstack(allbigramfeatures[:numtrain + numvali]) testfeatures = numpy.vstack(allbigramfeatures[numtrain + numvali:]) trainfeatures = alltrainfeatures[:numtrain] valifeatures = alltrainfeatures[numtrain:] alltrainlabels = trainlabels valilabels = trainlabels[numtrain:] trainlabels = trainlabels[:numtrain] del allbigramfeatures #CLASSIFICATION #weightcosts = [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.0] weightcosts = [0.01, 0.001, 0.0001, 0.0] valicosts = [] lr = logreg.Logreg(numclasses, trainfeatures.shape[1]) lr.train(trainfeatures.T, trainlabels.T, numsteps=100, verbose=False, weightcost=weightcosts[0]) lr.train_cg(trainfeatures.T, trainlabels.T, weightcost=weightcosts[0], maxnumlinesearch=100) valicosts.append(lr.zeroone(valifeatures.T, valilabels.T)) for wcost in weightcosts[1:]: lr.train(trainfeatures.T, trainlabels.T, numsteps=100, verbose=False,
pooled_features[:, (i_v * h_sections + i_h) * num_centroids:(i_v * h_sections + i_h + 1) * num_centroids] = mean( feature_map[:, i_v * h_sections + i_h], 1) if array_im.shape[0] == 1: pooled_features = reshape(pooled_features, (v_sections * h_sections * num_centroids)) return pooled_features.T numclasses = 7 wc = 1e-3 lr = logreg.Logreg(numclasses, v_sections * h_sections * num_centroids) lr.weights = loadtxt(os.path.join(params_dir, "weights_submitted.txt")) lr.biases = loadtxt(os.path.join(params_dir, "biases_submitted.txt")) test = [] #for j in sorted(os.listdir(target_dir)): for j in clip_ids: for k in sorted(os.listdir(os.path.join(target_dir, j))): test.append( imread(os.path.join(target_dir, j, k))[v_min:v_max, h_min:h_max]) test = asarray(test) num_test_images = shape(test)[0] test_features = zeros(