Exemple #1
0
		'debugMode': False, # whether to record the debug info
		'parallelMode': True # whether to leverage multiprocessing for speedup
		}

initConfig(para)
#########################################################


startTime = time.clock() # start timing
logger.info('==============================================')
logger.info('ADF: [Wu et al., TSMC\'2013].')

# load the dataset
dataMatrix = dataloader.load(para)
logger.info('Loading data done.')

# run for each density
if para['parallelMode']: # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para['density']:
		pool.apply_async(evaluator.execute, (dataMatrix, density, para))
    pool.close()
    pool.join()
else: # run on single processes
	for density in para['density']:
		evaluator.execute(dataMatrix, density, para)

logger.info(time.strftime('All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
         time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
Exemple #2
0
startTime = time.clock() # start timing
logger.info('==============================================')
logger.info('Approach: HMF [He et al., ICWS\'2014].')

# load the dataset
dataMatrix = dataloader.load(para)
logger.info('Loading data done.')

# get the location groups for users as well as for services
locGroup = dataloader.getLocGroup(para)
logger.info('Clustering done.') 

# run for each density
if para['parallelMode']: # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para['density']:
		pool.apply_async(evaluator.execute, (dataMatrix, locGroup, density, para))
    pool.close()
    pool.join()
else: # run on single processes
	for density in para['density']:
		evaluator.execute(dataMatrix, locGroup, density, para)

logger.info(time.strftime('All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
         time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')



Exemple #3
0
    'parallelMode': True  # whether to leverage multiprocessing for speedup
}

initConfig(para)
#########################################################

startTime = time.clock()  # start timing
logger.info('==============================================')
logger.info('CLUS: [Silic et al., FSE\'2013].')

# load the dataset
dataTensor = dataloader.load(para)
logger.info('Loading data done.')

# run for each density
if para['parallelMode']:  # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para['density']:
        pool.apply_async(evaluator.execute, (dataTensor, density, para))
    pool.close()
    pool.join()
else:  # run on single processes
    for density in para['density']:
        evaluator.execute(dataTensor, density, para)

logger.info(
    time.strftime(
        'All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
        time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
Exemple #4
0
    'parallelMode': True  # whether to leverage multiprocessing for speedup
}

initConfig(para)
#########################################################

startTime = time.clock()  # start timing
logger.info('==============================================')
logger.info('NMF: Non-negative Matrix Factorization.')

# load the dataset
dataMatrix = dataloader.load(para)
logger.info('Loading data done.')

# run for each density
if para['parallelMode']:  # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para['density']:
        pool.apply_async(evaluator.execute, (dataMatrix, density, para))
    pool.close()
    pool.join()
else:  # run on single processes
    for density in para['density']:
        evaluator.execute(dataMatrix, density, para)

logger.info(
    time.strftime(
        'All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
        time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
Exemple #5
0
		'outPath': 'result/',
		'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE', 'NPRE'], # delete where appropriate		
		'density': np.arange(0.05, 0.31, 0.05), # matrix density
		'rounds': 20, # how many runs are performed at each matrix density
		'dimension': 10, # dimenisionality of the latent factors
		'etaInit': 0.001, # inital learning rate. We use line search
						 # to find the best eta at each iteration
		'lambda': 200, # regularization parameter
		'maxIter': 300, # the max iterations
		'saveTimeInfo': False, # whether to keep track of the running time
		'saveLog': True, # whether to save log into file
		'debugMode': False, # whether to record the debug info
        'parallelMode': True # whether to leverage multiprocessing for speedup
		}


startTime = time.time() # start timing
utils.setConfig(para) # set configuration
logger.info('==============================================')
logger.info('PMF: Probabilistic Matrix Factorization')

# load the dataset
dataTensor = dataloader.load(para)

# evaluate QoS prediction algorithm
evaluator.execute(dataTensor, para)

logger.info('All done. Elaspsed time: ' + utils.formatElapsedTime(time.time() - startTime)) # end timing
logger.info('==============================================')
 
# parameter config area
para = {'dataPath': '../data/', # data path
        'dataName': 'Orangelab_sense_temperature', # set the dataset name     
        'outPath': 'result/', # output path for results
        'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE', 'NNPRE', 'SNR'], # evaluation metrics  
        'samplingRate': np.arange(0.05, 0.96, 0.05), # sampling rate
        'rounds': 1, # how many runs to perform at each sampling rate
        'lmbda': 1e-5, # sparisty regularization parameter
        'trainingPeriod': 33, # training time periods
        'saveTimeInfo': False, # whether to keep track of the running time
        'saveLog': False, # whether to save log into file
        'debugMode': False, #whether to record the debug info
        'parallelMode': False # whether to leverage multiprocessing for speedup
        }

startTime = time.time() # start timing
utils.setConfig(para) # set configuration
logger.info('==============================================')
logger.info('CS-PCA: [Quer et al., TWC\'2012]')

# load the dataset
dataMatrix = dataloader.load(para)

# evaluate compressive monitoring algorithm
evaluator.execute(dataMatrix, para)

logger.info('All done. Elaspsed time: ' + utils.formatElapsedTime(time.time() - startTime)) # end timing
logger.info('==============================================')

Exemple #7
0
startTime = time.clock()  # start timing
logger.info('==============================================')
logger.info('Approach: LoRec [Chen et al, TPDS\'2014].')

# load the dataset
dataMatrix = dataloader.load(para)
logger.info('Loading data done.')

# get the inital user regions
initUserRegion = dataloader.getInitalRegion(para)
logger.info('Create initial user regions done.')

# run for each density
if para['parallelMode']:  # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para['density']:
        pool.apply_async(evaluator.execute,
                         (dataMatrix, initUserRegion, density, para))
    pool.close()
    pool.join()
else:  # run on single processes
    for density in para['density']:
        evaluator.execute(dataMatrix, initUserRegion, density, para)

logger.info(
    time.strftime(
        'All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
        time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
Exemple #8
0
    'dataPath': '../../../data/',
    'dataName': 'dataset#2',
    'dataType': 'rt',  # set the dataType as 'rt' or 'tp'
    'outPath': 'result/',
    'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE',
                'NPRE'],  # delete where appropriate		
    'density': np.arange(0.05, 0.31, 0.05),  # matrix density
    'rounds': 20,  # how many runs are performed at each matrix density
    'topK': 10,  # the parameter of TopK similar users or services
    'lambda': 0.8,  # the combination coefficient of UPCC and IPCC
    'saveTimeInfo': False,  # whether to keep track of the running time
    'saveLog': True,  # whether to save log into file
    'debugMode': False,  # whether to record the debug info
    'parallelMode': True  # whether to leverage multiprocessing for speedup
}

startTime = time.time()  # start timing
utils.setConfig(para)  # set configuration
logger.info('==============================================')
logger.info('Approach: [UPCC, IPCC, UIPCC][TSC 2011]')

# load the dataset
dataTensor = dataloader.load(para)

# evaluate QoS prediction algorithm
evaluator.execute(dataTensor, para)

logger.info('All done. Elaspsed time: ' +
            utils.formatElapsedTime(time.time() - startTime))  # end timing
logger.info('==============================================')
Exemple #9
0
startTime = time.clock()  # start timing
logger.info("==============================================")
logger.info("Approach: HMF [He et al., ICWS'2014].")

# load the dataset
dataMatrix = dataloader.load(para)
logger.info("Loading data done.")

# get the location groups for users as well as for services
locGroup = dataloader.getLocGroup(para)
logger.info("Clustering done.")

# run for each density
if para["parallelMode"]:  # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para["density"]:
        pool.apply_async(evaluator.execute, (dataMatrix, locGroup, density, para))
    pool.close()
    pool.join()
else:  # run on single processes
    for density in para["density"]:
        evaluator.execute(dataMatrix, locGroup, density, para)

logger.info(
    time.strftime(
        "All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.", time.gmtime(time.clock() - startTime)
    )
)
logger.info("==============================================")
sys.path.remove("src")
Exemple #10
0
    'outPath': 'result/',
    'metrics': ['NDCG', 'Precision'],  # delete where appropriate
    'metric_parameter': [1, 5, 10, 50, 100],
    'density': [0.01, 0.1, 0.3],  # matrix density
    'rounds': 10,  # how many runs are performed at each matrix density
    'dimension': 10,  # dimenisionality of the latent factors
    'etaInit': 0.01,  # inital learning rate. We use line search
    # to find the best eta at each iteration
    'lambda': 0.1,  # regularization parameter
    'maxIter': 300,  # the max iterations
    'saveTimeInfo': False,  # whether to keep track of the running time
    'saveLog': True,  # whether to save log into file
    'debugMode': False,  # whether to record the debug info
    'parallelMode': True  # whether to leverage multiprocessing for speedup
}

startTime = time.time()  # start timing
utils.setConfig(para)  # set configuration
logger.info('==============================================')
logger.info('PMF: Probabilistic Matrix Factorization')

# load the dataset
dataMatrix = dataloader.load(para)

# evaluate QoS prediction algorithm
evaluator.execute(dataMatrix, para)

logger.info('All done. Elaspsed time: ' +
            utils.formatElapsedTime(time.time() - startTime))  # end timing
logger.info('==============================================')
Exemple #11
0
initConfig(para)
#########################################################


startTime = time.clock() # start timing
logger.info('==============================================')
logger.info('Approach: LN_LFM [Yu et al., SCC\'2014]')

# load the dataset
dataMatrix = dataloader.load(para)
logger.info('Loading data done.')

# load the service location information
wsInfoList = dataloader.loadServInfo(para)
logger.info('Loading service location information done.')

# run for each density
if para['parallelMode']: # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para['density']:
		pool.apply_async(evaluator.execute, (dataMatrix, wsInfoList, density, para))
    pool.close()
    pool.join()
else: # run on single processes
	for density in para['density']:
		evaluator.execute(dataMatrix, wsInfoList, density, para)

logger.info(time.strftime('All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
         time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
Exemple #12
0
#########################################################


startTime = time.clock() # start timing
logger.info('==============================================')
logger.info('Approach: RegionKNN [Chen et al, ICWS\'2010].')

# load the dataset
dataMatrix = dataloader.load(para)
logger.info('Loading data done.')

# get the inital user regions
initUserRegion = dataloader.getInitalRegion(para)
logger.info('Create initial user regions done.')

# run for each density
if para['parallelMode']: # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para['density']:
		pool.apply_async(evaluator.execute, (dataMatrix, initUserRegion, density, para))
    pool.close()
    pool.join()
else: # run on single processes
	for density in para['density']:
		evaluator.execute(dataMatrix, initUserRegion, density, para)

logger.info(time.strftime('All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
         time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
Exemple #13
0
def execute(code):
    return evaluator.execute(parser.parser(tokenizer.tokenize(code)))
Exemple #14
0
		'debugMode': False, # whether to record the debug info
        'parallelMode': True # whether to leverage multiprocessing for speedup
		}

initConfig(para)
#########################################################


startTime = time.clock() # start timing
logger.info('==============================================')
logger.info('Baseline approach.')

# load the dataset
dataTensor = dataloader.load(para)
logger.info('Loading data done.')

# run for each density
if para['parallelMode']: # run on multiple processes
    pool = multiprocessing.Pool()
    for density in para['density']:
        pool.apply_async(evaluator.execute, (dataTensor, density, para))
    pool.close()
    pool.join()
else: # run on single processes
    for density in para['density']:
		evaluator.execute(dataTensor, density, para)

logger.info(time.strftime('All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
         time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
Exemple #15
0
        'rounds': 20, # how many runs are performed at each matrix density
        'dimension': 10, # dimenisionality of the latent factors
		'eta': 0.0001, # learning rate
		'alpha': 0.6, # the combination coefficient
		'lambda': 5, # regularization parameter
        'maxIter': 300, # the max iterations
        'saveTimeInfo': False, # whether to keep track of the running time
        'saveLog': True, # whether to save log into file
        'debugMode': False, # whether to record the debug info
        'parallelMode': True # whether to leverage multiprocessing for speedup
        }


startTime = time.time() # start timing
utils.setConfig(para) # set configuration
logger.info('==============================================')
logger.info('LN-LFM: Latent Neighbor and Latent Factor Model')

# load the dataset
dataMatrix = dataloader.load(para)

# load the service location information
wsInfoList = dataloader.loadServInfo(para)

# evaluate QoS prediction algorithm
evaluator.execute(dataMatrix, wsInfoList, para)

logger.info('All done. Elaspsed time: ' + utils.formatElapsedTime(time.time() - startTime)) # end timing
logger.info('==============================================')