예제 #1
0
                'Time Delay': CRPtimeDelays,
                'dA Learning Rate': learningRates,
                'dA Batch Size': batchSizes,
                'dA Num Hidden Units': numHiddens,
                'dA Corruption Level': corruptionLevels}
                
numRuns = 0
deleteNCDPickleFiles()
existingNCDs = None
processPool = Pool(numProcesses)

iteration = 0
stopRunningAt = datetime(2015, 8, 18, 10)

opt = Optimiser(settingsDict, 
                oldResultsDataFrame = None, 
                resultsColumn = 'Mean Average Precision',
                noImprovementStoppingRounds = None)

currentDateTime = datetime.now()

while currentDateTime < stopRunningAt:
    
    nextSettings = True
    iteration += 1

    while nextSettings is not None and currentDateTime < stopRunningAt:
        nextSettings = opt.getNextSettings()
        if nextSettings is not None:
            for setting in nextSettings:
                # Create CRPs and NCDs
                # load weights if this is for a neural net run
예제 #2
0
    'FENS Quantisation Weight 3': FENSquantisationWeights3,
    'FENS Quantisation Weight 4': FENSquantisationWeights4
}

if NNtype is not None:
    settingsDict['dA Num Hidden Units'] = numHiddens
    settingsDict['dA Num Visible Units'] = numVisibles
    settingsDict['dA Corruption Level'] = corruptionLevels

# Initialise
numRuns = 0
iteration = 2
processPool = Pool(numProcesses)
opt = Optimiser(settingsDict,
                oldResultsDataFrame=None,
                resultsColumn='Mean Average Precision',
                noImprovementStoppingRounds=None,
                floatRounding=4)

featureFileDict = None
FENSfeatureFileDict = None
currentDateTime = datetime.now()

# Load base features
piecesPath = FFP.getRootPath(baseFeatureName)
pieceIds = getFolderNames(piecesPath,
                          contains='mazurka',
                          orderAlphabetically=True)[:numFolders]
print 'Loading feature file dict...'
featureFileDict = FFP.loadFeatureFileDictAllFolders(piecesPath, pieceIds,
                                                    baseFeatureName,
예제 #3
0
parser.add_argument('-o', '--output', default=None)

args = parser.parse_args()


def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
    return ''.join(random.choice(chars) for x in range(size))


new_tmpdir = tempfile.mkdtemp(prefix='tmpwrap_mgp_', dir=args.tmpdir)

c = Collection(input_dir=args.input_dir,
               compression=args.compression,
               file_format=args.format,
               datatype=args.datatype,
               tmpdir=new_tmpdir)

o = Optimiser(args.nclusters, c)
o.optimise(max_iter=500,
           nreassign=args.nreassign,
           sample_size=args.sample_size)

output_name = args.output or 'output_' + id_generator(6)
output_fh = open(output_name, 'w+')
headings = ['Iteration', 'CPU Time', 'Likelihood', 'Partition']
output = [[i] + x for i, x in enumerate(o.Scorer.history)]

writer = csv.writer(output_fh, delimiter='\t', quoting=csv.QUOTE_NONE)
writer.writerow(headings)
writer.writerows(output)
예제 #4
0
파일: opt_run.py 프로젝트: mgperry/treeCl
parser.add_argument('-c', '--compression', default=None)
parser.add_argument('-t', '--tmpdir', default='/tmp/')
# Collect all args for optimse and parse them later?
parser.add_argument('-r', '--nreassign', default=10, type=int)
parser.add_argument('-s', '--sample_size', default=10, type=int)
parser.add_argument('-o', '--output', default=None)

args = parser.parse_args()


def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
    return ''.join(random.choice(chars) for x in range(size))

new_tmpdir = tempfile.mkdtemp(prefix='tmpwrap_mgp_', dir=args.tmpdir)

c = Collection(input_dir=args.input_dir,
               compression=args.compression, file_format=args.format, datatype=args.datatype,
               tmpdir=new_tmpdir)

o = Optimiser(args.nclusters, c)
o.optimise(max_iter=500, nreassign=args.nreassign, sample_size=args.sample_size)

output_name = args.output or 'output_' + id_generator(6)
output_fh = open(output_name, 'w+')
headings = ['Iteration', 'CPU Time', 'Likelihood', 'Partition']
output = [[i] + x for i, x in enumerate(o.Scorer.history)]

writer = csv.writer(output_fh, delimiter='\t', quoting=csv.QUOTE_NONE)
writer.writerow(headings)
writer.writerows(output)
예제 #5
0
#!/usr/bin/env python3

from utils import read_matrix, read_data
from optimiser import Optimiser

# Read the data,  one number (float) per line, so line 0 has the data for vertex 0 and so-on
data = read_data("data.txt")

# Read an n x n adjacency matrix, each row on a new line and separated by commas
W = read_matrix("adj_matrix.txt")

# Create an optimser object
opt = Optimiser(W, data)

# Call the optimiser with the selected parameters, returning the optimal graph
optimal_graph = opt.iterative_opt(remove=True, add=True, remove_first=False)