Пример #1
0
 def clickedStartButton(self):
     fild = self.fieldSize.text()
     bomb = self.bombQuantity.text()
     if not (fild.isdigit() and bomb.isdigit()):
         Miner_error.main(u'Вы ввели не правильные параметры для игры. Попробуйте еще раз')
     elif int(fild) ** 2 <= 2 * int(bomb):
         Miner_error.main(u'Количество бомб превышает 50% ячеек от заданого поля')
     else:
         Miner.main(fild , bomb)
Пример #2
0
def load_profiles(miner: Miner, sub_directory: str = "profile"):
    current_directory = os.path.join(miner.root, sub_directory)
    if not os.path.exists(current_directory):
        os.mkdir(current_directory)

    # create the profiles list before calling this batch profile loading
    df = pd.read_csv(os.path.join(current_directory, "profiles.csv"))
    ids = df['account_id'].values
    for account_id in ids:
        miner.profile_mine(account_id=account_id, timeout=120)
Пример #3
0
def stopMiner():
    global tMS, tNF
    Miner.StopAll()
    if tMS: tMS.join()
    if tNF: tNF.join()
    tMS = None
    tNF = None
    return True
def run():
    genesis_block_txns = [genesisState]
    genesis_block_contents = {u'blockNumber': 0, u'parentHash': None, u'txnCount': 1, u'txns': genesis_block_txns}
    genesis_hash = blockchain.hash_me(genesis_block_contents)
    genesis_block = {u'hash': genesis_hash, u'contents': genesis_block_contents}
    genesis_block_string = blockchain.json.dumps(genesis_block, sort_keys=True)

    global genesisChain
    genesisChain = [genesis_block_string]

    print(genesisChain)

    # import and run miner, after genesis creation
    import Miner
    Miner.chain = genesisChain
    Miner.state = genesisState
    Miner.run()
Пример #5
0
    def watch():
        '''Create list of miners, validate them, plot them live.'''
        #  Get miners
        miners = []
        #  First check args
        if args.miner:
            #  Look for given miner in config
            if config[args.miner]:
                #  Miner exists in config file, create instance
                new_miner = Miner.Miner(
                    args.miner,  #  Name
                    config[args.miner]['host'],
                    config[args.miner]['port'],
                    Stats.Stats('Claymore json'))
                miners.append(new_miner)
                print('{}: Miner set {}'.format(__name__, str(new_miner)))
            else:
                sys.exit('Miner not in config.')
        #  Second check config
        elif int(config['GENERAL']['miners']) > 0:
            #  Get miners from config
            num_miners = int(config['GENERAL']['miners'])
            miners = []
            for each_section in config.sections():
                if each_section == 'GENERAL':
                    #  General is not a miner name, skip this section.
                    pass
                else:
                    #  For each miner section...
                    new_miner = Miner.Miner(
                        each_section,  #  Name
                        config.get(each_section, 'host'),
                        config.get(each_section, 'port'),
                        Stats.Stats('Claymore json'))
                    miners.append(new_miner)
        else:
            #  No miners in config
            print('{}: Miner not set. Add one to config.'.format(__name__))

        # TODO: Validate miners, if theyre inactive remove them from the list

        watcher = Watcher.Watcher(miners)
        names = [miner.name for miner in miners]
        print('Plotting {} stats every {}s.'.format(', '.join(names), inter))
        plotter.plot_live(watcher)
Пример #6
0
def stopMiner():
    global tMS, tNF

    # Stop nonceFinder
    # Stop minerServer
    Miner.StopAll()
    if tMS: tMS.join()
    if tNF: tNF.join()

    tMS = None
    tNF = None

    return True
Пример #7
0
	def __init__(self, name):
		super(Node, self).__init__()
		self.name = name
		self.mempool = set([])
		self.blockchain = [Block.Block(num_ = 0, data_ = ["Genesis Block"], hashb_ = "None", hashp_ = "None", transactionCount = 1)]
		self.difficulty = 4 
		self.nodeServer = NodeServer.NodeServer(name, 4242, self)
		self.webServer = WebServer.WebServer(name, 4254, self)
		self.client = Client.Client(name)
		self.miner = Miner.Miner(self, self.difficulty)
		self.consenter = Consenter.Consenter(self, self.difficulty)
		self.hosts = set([])
		self.webHosts = set([])
Пример #8
0
def create_miners(relayPorts,numMinersPerRelay):
    """
    Creates miners for ports in relayPorts list.
    For an index of the relayPorts list, the function creates n Miners, n being the value of the numMinersPerRelay list for this index.
    """
    MinerList=[]
    print("Creating miners:")
    for portIndex,numWallet in enumerate(numMinersPerRelay): 
        for _ in range(numWallet):  
            print("Miner "+str(len(MinerList))+" created.")
            password=''.join(random.choice(string.ascii_letters+string.digits) for _ in range(16))
            MinerList.append(Miner.Miner(portIndex,relayPorts[portIndex],password))
    print("")
    
    return MinerList
Пример #9
0
def start_relays(masterPort, relaysPorts):
    """Start the relays for the simulation."""
    for i in range(0, len(relaysPorts)):
        port = relaysPorts[i]
        try:
            threading.Thread(target=Relay.Relay, args=(
                masterPort,
                port,
            )).start()
        except:
            print("Error: unable to start thread " + str(i))
    print('miner 1 starts ...')
    Miner1 = Miner.Miner(1, relaysPorts[0], WalletPassword='******')
    for idx in range(100000):
        diff = Miner1.get_difficulty_from_master()
        print('Current difficulty = ' + str(diff))
        print('Miner 1 does a POW ...')
        Miner1.do_pow()
Пример #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-l', '--length', action='store', help='board length')
    parser.add_argument('-w', '--width', action='store', help='board width')
    parser.add_argument('-m',
                        '--mines',
                        action='store',
                        dest='setmines',
                        help='number of mines on the board')
    options = parser.parse_args()
    if not options.length:
        options.length = raw_input('enter board length: ')
    if not options.width:
        options.width = raw_input('enter board width: ')
    if not options.setmines:
        options.setmines = raw_input('enter number of mines: ')
    miner = Miner.setmines(length=int(options.length),
                           width=int(options.width),
                           minecount=int(options.setmines))
    miner.draw_board(miner.placemines())
Пример #11
0
def main_regular():
    print("Main: starts")

    # data_file_name - the file where the articles & categories data are saved
    data_file_name = "./Data/3-Level-DataSet-Final"
    # root_category - the wikipedia root category from which the mining procedure begins
    root_category = "Category:Main_topic_classifications"

    ####################################################################################################################
    # Mining
    # - Data Mining
    # - Statistics
    ####################################################################################################################
    # - Data Mining
    # load_data_from_file - should the data be mined from wikipedia first or loaded directly from the JSON file
    load_data_from_file = True
    # max_level - the max level (inclusive) in the categories hierarchy which will be mined (level 0 is the root level)
    max_level = 3
    miner = Miner(root_category,
                  max_level=max_level,
                  load_from_file=load_data_from_file,
                  file_name=data_file_name,
                  level_1_categories=level_1_categories,
                  level_2_categories=level_2_categories,
                  level_3_categories=level_3_categories)
    data = miner.data

    # - Statistics
    statistics_by_category_file_name = './Data/statistics_by_category.csv'
    statistics_by_level_file_name = './Data/statistics_by_level.csv'
    StatisticsGenerator(data, statistics_by_level_file_name, statistics_by_category_file_name)

    ####################################################################################################################
    # Vectorization:
    ####################################################################################################################
    vectorization_types = [VectorizationType.full_text, VectorizationType.summary, VectorizationType.first_sentence]
    # should_normalize - should the resulting BERT vectors be normalized - this is a MUST if comparing using euclidean distance!
    should_normalize = True
    # should_vectorize - should the articles loaded from the file be vectorize
    should_vectorize = False
    vectorizer = TextVectorizer(data,
                                data_file_name,
                                vectorization_types,
                                should_vectorize,
                                should_normalize)
    data = vectorizer.data

    ####################################################################################################################
    # Data Splitting:
    ####################################################################################################################
    test_and_validation_sizes = 0.15
    data_splitter = DataSplitter(data, test_and_validation_sizes)
    x_test_ids, x_valid_ids = data_splitter.x_test_and_validation_ids

    ####################################################################################################################
    # Vectorization Evaluation:
    # - Parameter Selection
    # - Results Evaluation
    ####################################################################################################################
    # - Parameter Selection
    classifiers = [KNNClassifier(), Id3(), AdaBoost(), RandomForest(), SVM(), KerasNeuralNetwork()]
    # parameter_selection_folder - the folder where the results of the parameter selection process are saved
    parameter_selection_folder = "./Parameter Selection/Selected Parameters/"
    parameter_selector = ParameterSelector(data,
                                           x_test_ids,
                                           x_valid_ids,
                                           classifiers,
                                           vectorization_types,
                                           parameter_selection_folder,
                                           verbose=True,
                                           load_from_file=True)
    selected_parameters = parameter_selector.selected_parameters

    # - Results Evaluation
    # the max level for graph generation (inclusive)
    max_level_for_graphing = 1
    # the folder where the statistical graphs are saved
    graphs_folder = "./Parameter Selection/Graphs/"
    GraphCreation(max_level_for_graphing,
                  data,
                  classifiers,
                  vectorization_types,
                  selected_parameters,
                  parameter_selection_folder,
                  graphs_folder)

    ####################################################################################################################
    # Hierarchical Article Classifier
    ####################################################################################################################
    # the number of best estimator instances that will be used as part of the voting classification procedure
    voting_number_of_estimators = 3
    trained_classifiers_file_name = './trained_classifiers'
    confusion_matrix_file_name = './root_category_confusion_matrix'
    article_classifier = HierarchicalArticleClassifier(selected_parameters,
                                                       data,
                                                       x_test_ids,
                                                       x_valid_ids,
                                                       classifiers,
                                                       vectorization_types,
                                                       voting_number_of_estimators,
                                                       trained_classifiers_file_name,
                                                       load_from_file=True)
    article_classifier.select_and_fit_best_models_for_each_category()
    # The next line generates the statistical results shown in the report for the hierarchical classifier
    article_classifier.evaluate_accuracies_by_level_and_path_length()
    article_classifier.generate_confusion_matrix(confusion_matrix_file_name)

    print("Main: finished")
Пример #12
0
import json
import Block
import Blockchain
import Operator
import Transaction
import Miner
import Node

import ed25519
""" --------------- """
""" --------------- """

uPCoin = Flask(__name__)
blockchain = Blockchain.Blockchain("blockchainDb", "transactionDb")
operator = Operator.Operator('walletDb', blockchain)
miner = Miner.Miner(blockchain, None)
node = Node.Node(os.environ["ip"], os.environ["port"], [], blockchain)
""" Main Page """


@uPCoin.route('/')
def index():
    # TODO: Prettify This Pag
    return render_template("upCoin.html")


""" Blockchain GET/POST requests """


@uPCoin.route('/blockchain/blocks', methods=['GET'])
def get_blocks():
Пример #13
0
import os
from math import *

# Making the map.
m_lPos = Positions.Position.pos.keys()
Window = Graphics.GraphWin("Window", 250, 180)
Window.master.geometry("+2200+42")
for name in m_lPos:
    point = Graphics.Point(
        Positions.Position.pos.get(name)[0],
        Positions.Position.pos.get(name)[1])
    Graphics.Text(point, name).draw(Window)

# Making miners.
miners = [
    Miner("Bob", Window),
    Miner("Jonny", Window, 0, 800, 0, 1000),
    Miner("Billy", Window, 1100, 0, 800, 0),
    Miner("Konny", Window, 0, 0, 0, 800, 0, 200)
]

# Update loop (Game loop)
while True:

    clear = lambda: os.system('cls')
    clear()
    for miner in miners:
        print(miner.m_Name + " stats: ")
        print("Doing:		" + str(miner.m_Doing))
        print("Location:	" + str(ceil(miner.m_tPos[0])) + "," +
              str(ceil(miner.m_tPos[1])))
Пример #14
0
import datetime
import time
import sys
import os

if __name__ == "__main__":
    #version =1
    #data = ["no data"]
    #bits = 440711666
    #print(Miner.createBlock(version,"0000000000000000000000000000000000000000000000000000000000000000",data,bits,4).printBlockInfo())
    li = []
    start = 0
    last = 0
    for i in range(0, len(os.environ['datainMining'])):
        if os.environ['datainMining'][i] == ",":
            last = i
            li.append(os.environ['datainMining'][start:last])
            start = last + 1
        elif i == len(os.environ['datainMining']) - 1:
            last = i + 1
            li.append(os.environ['datainMining'][start:last])
    block = Miner.createBlock(int(os.environ['version']),
                              os.environ['previous'], li,
                              int(os.environ['bits']), int(os.environ['zero']))
    print(block.nonce)
    print(block.merkleHash)
    print(block.version)
    print(block.blockHash)
    print(block.time)
    print(block.previousBlockHash)
    print(block.bits)
Пример #15
0
import Miner
import datetime
import time
import sys
import os

if __name__ == "__main__":
    # def checkBlockHash(nonce,version,previous,merkleHash,time,bits,blockHash1)
    if (Miner.checkBlockHash(int(os.environ["nonce"]),
                             int(os.environ["version"]),
                             os.environ["previousBlockHash"],
                             os.environ["merkleHash"], int(os.environ["time"]),
                             int(os.environ["bits"]),
                             os.environ["blockHash"])):
        print "true"
    else:
        print "false"
Пример #16
0
def main():
    
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        warnings.filterwarnings("ignore", category=DeprecationWarning)        
    
    preTime = time.time()
    
    LogFile = 'logging_Miner.txt'
    logging.basicConfig(filename = LogFile, level = logging.DEBUG, 
                        filemode= 'w', format = ('%(filename)s: ''%(levelname)s: ''%(funcName)s(): ''%(lineno)d:\t''%(message)s'))
    
    logging.info('Start Miner')
    
    readPath = ''
    writePath = ''
    #remember two spaces for yaml file and no tabs
    with open("ET_MinerConfig.yml",'r') as ymlFile:
        config = yaml.load(ymlFile)
        readPath = config['folder']['readpath']
        writePath = config['folder']['writepath']
        logging.info('reading from path: ' + readPath)          
    
    #nltk.download()
    #'C:\\Users\\fhokhold\\Documents\\Projects\\Vulcan\\vulcan-data\\hotel_info.csv'
    
#     data = pd.read_csv(Miner.find_most_recent_file(readPath),sep='\t',usecols=['Ad','Description line 1','Description line 2',
#                                                                                'Impressions','Cost','Conversions',
#                                                                                'top_sq_tokens', 'lp_text'])
    data = pd.read_csv(readPath)
    colnames = list(data.columns.values)
    
    #data['ad_text'] = data['Ad'] + data['Description line 1'] + data['Description line 2'] 
    
    #data = data.sort(['lp_text'],ascending=False)
    
    data.columns = Miner.MyFlattten([['hotel_id'],colnames[1:]])
    #data[25:11400].to_csv('C:\\Users\\fhokhold\\Documents\\Projects\\Vulcan\\ET\\data_text.csv',index=False)
    #sys.exit()
    data = Miner.ReIndex(data)   
   
    #reviewData = pd.read_csv('C:\\Users\\fhokhold\\Documents\\Projects\\Vulcan\\vulcan-data\\hotel_review.csv')    
    
    ##############################Freq Dist##################################################################
#     reviewText = getDocuments(data,'lp_text',True, False)
#     
#     totalText = ''
#     for k in xrange(int(len(reviewText)/20)):
#         totalText += str(reviewText[k])
#         
#     reviewText0 = nltk.word_tokenize(totalText)
#     #reviewText0 = Miner.tokenStemmer(reviewText0) #stemmer is not working very well
#     
#     
#     tempTokens = [word for word in reviewText0 if not Miner.punctuationNumbers.search(word)]
#     reviewText = [i.lower() for i in tempTokens if i.lower() not in Miner.stopWords]
#     
#     #Miner.stopWords.difference(set(['hotel','hotels','near'])) # this will effect the Miner file
#     
#     freqDistText = Miner.getFreqDist(reviewText, True) #pickle freqDist
#     
#     #print('frequency plot')
#     #freqDistText[0].plot(30,cumulative=True)    
#     
#     logging.info('Top words: ' )
#     logging.info(freqDistText[1][3:10])    
#     
#     #logging.info('cfDist predictions: ')
#     
#     #############################################word predictions######################################################
#     
#     print('top words')
#     print([i for i,j in freqDistText[1][3:10]])
#     
#     topWords = [i for i,j in freqDistText[1][3:100]]
#     wordsPred = [i for i,j in freqDistText[1][3:10]]
#     
#     print('topWords')
#     print(topWords)
#     print('wordsPred')
#     print(wordsPred)
#     
#     wordsPredictions = Miner.getConditionalDist(reviewText, topWords, wordsPred)
#     
#     logging.info(wordsPredictions)
#     
#     Ngrams = Miner.getNgram(reviewText, zip(wordsPred,wordsPred[::-1]), True)
#     
#     logging.info('Ngrams')
#     logging.info(' ')
#     logging.info(Ngrams[1])
#     
    #combineData = pd.merge(data.ix[:,['hotel_id','tags']], reviewData.ix[:,['hotel_id','title','body']], on=['hotel_id'],how='inner')
    
    #combineData.to_csv('C:\\Users\\fhokhold\\Documents\\Projects\\Vulcan\\vulcan-data\\combineData_text.csv')
    
    ###############################Topic Modeling######################################################################    
    topicData = (data[:5000].ix[133,:] , 'lp_text')
    print('topicData')
    print(topicData[0]['lp_text'])
    
    lda = Miner.getTopicsLDAandLSA(topicData[0],topicData[1],'lda',True)
    logging.info('lda topics')
    logging.info(lda[0].print_topics(10))
    logging.info('LDA perplexity: ' + str(lda[1]))
     
    lsa = Miner.getTopicsLDAandLSA(topicData[0],topicData[1],'lsa',True)
    logging.info('lsa topics')
    logging.info(lsa.print_topics(10))
    
    textBlockFlag = True
     
    dataText = Miner.getDocuments(topicData[0],topicData[1],True, textBlockFlag)
    print('dataText')
    print(dataText)
    tfidf_searchTerms, modelTfIdf = Miner.tf_idf(dataText)
     
    print('tfidf_searchTerms.T.toarray()')
    print(tfidf_searchTerms.T.toarray()) #word by doc, before transpose doc by word (row by col format)
     
    logging.info('tfidf_searchTerms transposed')
    logging.info(tfidf_searchTerms.T.toarray())
     
    tfidf_review, reviewModelTfIdf = Miner.tf_idf(Miner.getDocuments(data[:5000].ix[133,:],'lp_text',True, textBlockFlag))  
     
    topicsNMF = Miner.getTopicNMF(tfidf_searchTerms, modelTfIdf)
    logging.info('NMF topics')    
     
    #################Similiarity testing###################################################################################
    colNames = ['ad_text','lp_text']    
    
    topicData = (data[:5000], 'lp_text')
#     
#     logging.info('Pairwise similiarity')
#     #logging.info(Miner.similiarity(np.array([1,2,0,1]), np.array([0,2,2,1]), None, None))
#     logging.info(' ')       
#     
    combinePhraseDoc = Miner.CombineDocumentPhrase(data, colNames, True) #takes a long time to compute
     
    tfidf_review, combineModelTfIdf  = Miner.tf_idf(combinePhraseDoc)   
     
    dimReview = tfidf_review.toarray().shape
     
    tfidf_review_matrix = tfidf_review.toarray()    
     
    for i in xrange(0,int(dimReview[0]/10),2): #loop through by twos     
        try:                         
            #logging.info('phrase: ' + str(combinePhraseDoc[i]))
            #logging.info('document: ' + str(combinePhraseDoc[i+1]))
            #logging.info('phrase vector: ' )
            #logging.info(tfidf_review_matrix[i,:])
            #logging.info('doc vector: ' )
            #logging.info(tfidf_review_matrix[i+1,:])
            logging.info('similiarity: ' + str(Miner.similiarity(tfidf_review_matrix[i,:], tfidf_review_matrix[i+1,:], None, None)))
        except Exception, e:
            logging.warn('Error: ' + str(e))
Пример #17
0
         #logging.info('phrase: ' + str(combinePhraseDoc[i]))
         #logging.info('document: ' + str(combinePhraseDoc[i+1]))
         #logging.info('phrase vector: ' )
         #logging.info(tfidf_review_matrix[i,:])
         #logging.info('doc vector: ' )
         #logging.info(tfidf_review_matrix[i+1,:])
         logging.info('similiarity: ' + str(Miner.similiarity(tfidf_review_matrix[i,:], tfidf_review_matrix[i+1,:], None, None)))
     except Exception, e:
         logging.warn('Error: ' + str(e))
  
 logging.info(' ')            
 logging.info('Pairwise similiarity')
 logging.info(' ')
 dimCombineData = data.shape
  
 phrasesText = Miner.getDocuments(data,colNames[0],True)
 documentText = Miner.getDocuments(data,colNames[1], True)
  
 for j in xrange(int(dimCombineData[0]/10)):
     try:                
         #logging.info('phrase: ' + str(phrasesText[j]))
         #logging.info('document: ' + str(documentText[j]))
         tfidf_pair, pairTfIdf = Miner.tf_idf([phrasesText[j],documentText[j]]) 
         tfidf_pair_matrix = tfidf_pair.toarray()
         logging.info('similiarity: ' + str(Miner.similiarity(tfidf_pair_matrix[0,:], tfidf_pair_matrix[1,:], None, None)))
     except Exception, e:
         logging.warn('Error: ' + str(e))    
  
 #logging.info(topicsNMF.)
 
 ###########################Entity Extraction ###################################################################################
Пример #18
0
#/usr/bin/python3
import BlockChain
import Block
import Miner
from datetime import datetime

print("Hello world")
b = BlockChain.BlockChain()
m = Miner.Miner(b)
print(b.lastBlock)
m.mineNewBlock({
    "123fdssd3": "1dfds32fds",
    "amount": 0.00231
})
m.mineNewBlock({
    "b23fdssd3": "1dfds32fds",
    "amount": 0.00231
})
m.mineNewBlock({
    "a3fdssd3": "1dfds32fds",
    "amount": 0.00231
})
m.mineNewBlock({
    "fdssd3": "1dfds32fds",
    "amount": 0.00431
})
m.mineNewBlock({
    "c23fdssd3": "1dfds32fds",
    "amount": 0.01231
})
m.mineNewBlock({
Пример #19
0
print(util.splitBigInt(N))
sqs = SQS.SQS(N, g)
sqs.generateSQSKeys()
print(3)
h2 = sqs.getSQSPublicKey()
print(util.splitBigInt(h2))
print(4)
qp = QP.QP(N, g, h2)
print(5)
qp.generateQPKeys()
print(6)
h1 = qp.getQPPublicKey()
print(7)
h = h1 * h2
print(8)
miner = Miner.Miner(N)


def testOne(m):
    par = participant.Participant(g, N, h)
    cm1, cm2 = par.encryption(m)
    print("cm1: ")
    print(cm1)
    print("cm2: ")
    print(cm2)
    cm1prime, cm2prime = sqs.decryptNumberFirst(cm1, cm2)
    print("cm1prime: ")
    print(cm1prime)
    print("cm2prime: ")
    print(cm2prime)
    decryptedM = qp.decryptNumberSecond(cm1prime, cm2prime)
Пример #20
0
#SecondMiner

import threading
import Miner
import Signature
import time

my_ip = 'localhost'
wallets = [(my_ip, 5005), (my_ip, 5006)]


my_pr, my_pu = Signature.loadKeys("private.key", "public_key")
# args need to be passed as a TUPLE, in t1
t1 = threading.Thread(target=Miner.minerServer, args=(('localhost', 5007),))
t2 = threading.Thread(target=Miner.nonceFinder, args=(wallets, my_pu))


t1.start()
t2.start()
time.sleep(20)
Miner.StopAll()

t1.join()
t2.join()

print(ord(TxBlock.findLongestBlockchain(Miner.head_blocks).previousBlock.previousBlock.nonce[0]))
print(ord(TxBlock.findLongestBlockchain(Miner.head_blocks).previousBlock.nonce[0]))
print(ord(TxBlock.findLongestBlockchain(Miner.head_blocks).nonce[0]))
Пример #21
0
                        choices=["cryptonight"],
                        help="hashing algorithm to use for proof of work")
    parser.add_argument(
        "-o",
        "--url",
        help="stratum mining server url (eg: stratum+tcp://foobar.com:3333)")
    parser.add_argument("-u",
                        "--user",
                        dest="username",
                        default="",
                        help="username for mining server")
    parser.add_argument("-p",
                        "--pass",
                        dest="password",
                        default="",
                        help="password for mining server")
    parser.add_argument('-t',
                        '--thread',
                        default="1",
                        help="Number of mining threads to start")
    parser.add_argument('-d', '--debug', help="show extra debug information")

    options = parser.parse_args(sys.argv[1:])

    if options.debug:
        Utils.DEBUG = True

    miner = Miner(options.url, options.username, options.password,
                  options.algo, int(options.thread))
    miner.serve_forever()
Пример #22
0
import Miner

Miner.run()
Пример #23
0
def load_profiles(miner: Miner, sub_directory: str = "profile"):
    current_directory = os.path.join(miner.root, sub_directory)
    if not os.path.exists(current_directory):
        os.mkdir(current_directory)

    # create the profiles list before calling this batch profile loading
    df = pd.read_csv(os.path.join(current_directory, "profiles.csv"))
    ids = df['account_id'].values
    for account_id in ids:
        miner.profile_mine(account_id=account_id, timeout=120)


if __name__ == "__main__":
    # 1. Download change details
    miner = Miner(gerrit=Gerrit.libreoffice, replace=False)

    parameters = Parameters(status=Status.closed,
                            start_index=0,
                            end_index=-1,
                            after='2012-01-01 00:00:00.000000000',
                            before='2013-01-01 00:00:00.000000000',
                            fields=[
                                Field.all_revisions, Field.all_files,
                                Field.messages, Field.detailed_labels
                            ],
                            n_jobs=4,
                            batch_size=100)

    index = 0
    max_retry = 3
Пример #24
0
    import Miner
    import threading
    import Signatures

    def Thief(my_addr):
        my_ip, my_port = my_addr
        server = SocketUtils.newServerConnection(my_ip, my_port)
        # Get Txs from wallets
        while not break_now:
            newTx = SocketUtils.recvObj(server)
            if isinstance(newTx, Transactions.Tx):
                for ip, port in miners:
                    if not (ip == my_ip and port == my_port):
                        SocketUtils.sendObj(ip, newTx, port)

    Miner.saveTxList([], "Txs.dat")

    miner_pr, miner_pu = Signatures.generate_keys()
    t1 = threading.Thread(target=Miner.minerServer,
                          args=(('localhost', 5005), ))
    t2 = threading.Thread(target=Miner.nonceFinder, args=(wallets, miner_pu))
    t3 = threading.Thread(target=walletServer, args=(('localhost', 5006), ))
    t1.start()
    t3.start()

    pr1, pu1 = Signatures.loadKeys("private.key", "public.key")
    pr2, pu2 = Signatures.generate_keys()
    pr3, pu3 = Signatures.generate_keys()

    #Query balances
    bal1 = getBalance(pu1)
Пример #25
0
		block0 = Block.Block()
		#Initializing the block constituents (previous block and nonce) as 'None' as this is the first block of the chain
		block0.previous_block_hash = None
		block0.Nonce = None
		block0.verified_transactions.append(t0)
		
		#Now, hash the Genesis block and store the value in the global variable
		hashVal = hash(block0)
		global_last_block_hash = hashVal
		print("Adding the Genesis Block to the Blockchain")
		global_GavCoins.append(block0)
	
	elif user_choice == '4':
		clear_screen()
		block = Block.Block()
		miner = Miner.Miner()
		for i in range(3):
			temp_transaction = global_chain_transactions[global_last_transaction_index]
			#TBD - VALIDATE TRANSACTION
			#TBD - BELOW TO BE Added only if the transaction is valid
			block.verified_transactions.append(temp_transaction)
			global_last_transaction_index += 1
		
		block.previous_block_hash = global_last_block_hash
		block.Nonce = miner.mine(block, 2)
		digest = hash(block)
		global_GavCoins.append(block)
		global_last_block_hash = digest
		
	
	elif user_choice == '5':
Пример #26
0
    print("Error: unable to start thread")

# Starting the relays
for i in range(0, len(relayPorts)):
    port = relayPorts[i]
    try:
       threading.Thread(target=Relay.Relay, args=(masterPort, port,)).start()
    except:
        print("Error: unable to start thread " + str(i))
sleep(0.5)



print('Miner1 comes in ...')
password2 = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(16))
Miner1 = Miner.Miner(1,relayPorts[0],password2)
print('Miner1 mines a Block ...')
Miner1.do_pow()
print('Miner1 gets bonus, now Miner 1 has 10 coins: ')
print("Miner1 has: "+str(Miner1.get_wallet_balance())+" coins.")
print("Liyuan comes in ..." )
password1 = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(16))
Liyuan=Wallet.Wallet(relayPorts[0], password1)
print('Liyuan has no money: ')
print("Liyuan has: "+str(Liyuan.determine_wallet_money())+" coins")
print('Miner1 transacts 5 coins to Liyuan...')
LiyuanAddress=Liyuan.get_address()
Miner1.spend_money_from_wallet(LiyuanAddress, 5)
print('The miner on the same relay as Liyuan mines a new block to carry the new transaction...')
Miner1.do_pow()
print('Now Miner1 has 15 coins:')
Пример #27
0
def main():
    
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        warnings.filterwarnings("ignore", category=DeprecationWarning)   
     
    
    preTime = time.time()
    
    LogFile = 'logging_Miner.txt'
    logging.basicConfig(filename = LogFile, level = logging.DEBUG, 
                        filemode= 'w', format = ('%(filename)s: ''%(levelname)s: ''%(funcName)s(): ''%(lineno)d:\t''%(message)s'))
    
    logging.info('Start Miner')
    
    readPath = ''
    writePath = ''
    #remember two spaces for yaml file and no tabs
    with open("MinerConfig.yml",'r') as ymlFile:
        config = yaml.load(ymlFile)
        readPath = config['folder']['readpath']
        writePath = config['folder']['writepath']
        logging.info('reading from path: ' + readPath)        
    
    #nltk.download()
    #'C:\\Users\\fhokhold\\Documents\\Projects\\Vulcan\\vulcan-data\\hotel_info.csv'
    data = pd.read_csv(readPath)
    colnames = list(data.columns.values)
    
    data.columns = Miner.MyFlattten([['hotel_id'],colnames[1:]])
   
    reviewData = pd.read_csv('C:\\Users\\fhokhold\\Documents\\Projects\\Vulcan\\vulcan-data\\hotel_review.csv')
    
    
    ##############################Freq Dist##################################################################
    reviewText = getDocuments(reviewData,'body',True)
    
    totalText = ''
    for k in xrange(int(len(reviewText)/20)):
        totalText += str(reviewText[k])
        
    reviewText0 = nltk.word_tokenize(totalText)
    #reviewText0 = Miner.tokenStemmer(reviewText0) #stemmer is not working very well
    
    Miner.stopWords.update(['hotel','hotels','near'])
    reviewText = [i.lower() for i in reviewText0 if i.lower() not in Miner.stopWords]
    
    Miner.stopWords.remove(['hotel','hotels','near'])
    
    freqDistText = Miner.getFreqDist(reviewText, True) #pickle freqDist
    
    #print('frequency plot')
    #freqDistText[0].plot(30,cumulative=True)    
    
    logging.info('Top words: ' )
    logging.info(freqDistText[1][3:10])    
    
    logging.info('cfDist predictions: ')
    
    #############################################word predictions######################################################
    
    print('top words')
    print([i for i,j in freqDistText[1][3:10]])
    
    topWords = [i for i,j in freqDistText[1][3:100]]
    wordsPred = [i for i,j in freqDistText[1][3:10]]
    
    print('topWords')
    print(topWords)
    print('wordsPred')
    print(wordsPred)
    
    wordsPredictions = Miner.getConditionalDist(reviewText, topWords, wordsPred)
    
    logging.info(wordsPredictions)
    
#     Ngrams = Miner.getNgram(reviewText, zip(wordsPred,wordsPred[::-1]), True)
#     
#     logging.info('Ngrams')
#     logging.info(' ')
#     logging.info(Ngrams[1])
#     
    combineData = pd.merge(data.ix[:,['hotel_id','tags']], reviewData.ix[:,['hotel_id','title','body']], on=['hotel_id'],how='inner')
    
    combineData.to_csv('C:\\Users\\fhokhold\\Documents\\Projects\\Vulcan\\vulcan-data\\combineData_text.csv')
    
    ###############################Topic Modeling######################################################################
#     topicData = (combineData , 'body')
#     lda = Miner.getTopicsLDAandLSA(topicData[0],topicData[1],'lda')
#     logging.info('lda topics')
#     logging.info(lda[0].print_topics(10))
#     logging.info('LDA perplexity: ' + str(lda[1]))
#     
#     lsa = Miner.getTopicsLDAandLSA(topicData[0],topicData[1],'lsa')
#     logging.info('lsa topics')
#     logging.info(lsa.print_topics(10))
#     
#     dataText = Miner.getDocuments(topicData[0],topicData[1],True)
#     tfidf_searchTerms, modelTfIdf = Miner.tf_idf(dataText)
#     
#     print('tfidf_searchTerms.T.toarray()')
#     print(tfidf_searchTerms.T.toarray()) #word by doc, before transpose doc by word (row by col format)
#     
#     logging.info('tfidf_searchTerms transposed')
#     logging.info(tfidf_searchTerms.T.toarray())
#     
#     tfidf_review, reviewModelTfIdf = Miner.tf_idf(Miner.getDocuments(combineData,'body',True))  
#     
#     topicsNMF = Miner.getTopicNMF(tfidf_searchTerms, modelTfIdf)
#     logging.info('NMF topics')    
#     
    #################Similiarity testing###################################################################################
    colNames = ['title','body']    
    
    logging.info('Pairwise similiarity')
    #logging.info(Miner.similiarity(np.array([1,2,0,1]), np.array([0,2,2,1]), None, None))
    logging.info(' ')       
#     
#     combinePhraseDoc = Miner.CombineDocumentPhrase(combineData, colNames, True) #takes a long time to compute
#     
#     tfidf_review, combineModelTfIdf  = Miner.tf_idf(combinePhraseDoc)   
#     
#     dimReview = tfidf_review.toarray().shape
#     
#     tfidf_review_matrix = tfidf_review.toarray()    
#     
#     for i in xrange(0,int(dimReview[0]/10),2): #loop through by twos     
#         try:                         
#             logging.info('phrase: ' + str(combinePhraseDoc[i]))
#             logging.info('document: ' + str(combinePhraseDoc[i+1]))
#             #logging.info('phrase vector: ' )
#             #logging.info(tfidf_review_matrix[i,:])
#             #logging.info('doc vector: ' )
#             #logging.info(tfidf_review_matrix[i+1,:])
#             logging.info('similiarity: ' + str(Miner.similiarity(tfidf_review_matrix[i,:], tfidf_review_matrix[i+1,:], None, None)))
#         except Exception, e:
#             logging.warn('Error: ' + str(e))
#     
#     logging.info(' ')            
#     logging.info('Pairwise similiarity')
#     logging.info(' ')
#     dimCombineData = combineData.shape
#     
#     phrasesText = Miner.getDocuments(combineData,colNames[0],True)
#     documentText = Miner.getDocuments(combineData,colNames[1], True)
#     
#     for j in xrange(int(dimCombineData[0]/10)):
#         try:                
#             logging.info('phrase: ' + str(phrasesText[j]))
#             logging.info('document: ' + str(documentText[j]))
#             tfidf_pair, pairTfIdf = Miner.tf_idf([phrasesText[j],documentText[j]]) 
#             tfidf_pair_matrix = tfidf_pair.toarray()
#             logging.info('similiarity: ' + str(Miner.similiarity(tfidf_pair_matrix[0,:], tfidf_pair_matrix[1,:], None, None)))
#         except Exception, e:
#             logging.warn('Error: ' + str(e))    
#     
    #logging.info(topicsNMF.)
    
    ###########################Entity Extraction ###################################################################################
   
    tagToken = Miner.ExtractTags(combineData[:10],'body')
    Entities = Miner.ExtractEntity(tagToken)
    
    logging.info('compute time: {0}'.format(time.time() - preTime))
    Entities[0].draw()    
    
    RegexEntities = Miner.grammarEntity(tagToken) #takes long
    RegexEntities[0].draw()
   
    logging.info('Entities')
    for entity in Entities:
        logging.info(entity)
        logging.info(' ')
    
    #'C:\\Users\\fhokhold\\Documents\\Projects\\Vulcan\\vulcan-data\\entity_test.csv'
    logging.info('write to path: ' + writePath)
    pd.DataFrame(Entities).to_csv(writePath, index=False)
    
    w2vec = Miner.getTopicWord2VecNeuralNet(data, 'tags')
    logging.info('word2vec features')
    logging.info(w2vec.accuracy())