def main():
    
    Utility.DirectoryCleaning('transaction')
    Utility.DirectoryCleaning('result')

    if raw_input('Is disk connected? [y/n]: ').lower()=='y':

        disk=Collector.VolumeFinder()

        select = (raw_input('Would you like to create image? [y/n] : ').lower()=='y')
        recovery = (raw_input('Would you like to recover the image? [y/n] : ').lower()=='y')
        
        print 'Collecting files...'
        volumeHeader=VolumeHeader(Utility.DiskDump(disk,'HFSPlusVolumeHeader',512,2,1,select))

        journal=Collector.JournalExtractor(self.journalInfoBlock,disk,volumeHeader.blockSize,select)
        allocationFile=Collector.SpecialFileExtractor('AllocationFile',volumeHeader.specialFileFork[0],disk,volumeHeader.blockSize,select)
        extentsOverflowFile=Collector.SpecialFileExtractor('ExtentsOverflowFile',volumeHeader.specialFileFork[1],disk,volumeHeader.blockSize,select)
        catalogFile=Collector.SpecialFileExtractor('CatalogFile',volumeHeader.specialFileFork[2],disk,volumeHeader.blockSize,select)
    
    else:
        
        path=raw_input('Input path of volume header\n')
        f=open(path,'rb')
        volumeHeader=VolumeHeader(f.read())
        f.close()
        
        path=raw_input('Input path of journal\n')
        f=open(path,'rb')
        journal=f.read()
        f.close()

        if raw_input('Input path of catalog file(y/n)\n') == 'y':
            path=input()
            f=open(path,'rb')
            catalogFile=f.read()
            f.close()
        
        """
        if raw_input('Input path of allocation file(y/n)') == 'y':
        path=raw_input()
        f=open(path,'rb')
        AllocationFile=f.read()
        f.close()
        
        if raw_input('Input path of extents overflow file(y/n)\n') == 'y':
        path=raw_input()
        f=open(path,'rb')
        ExtentsOverflowFile=f.read()
        f.close()
        """
        recovery=False

    print 'Analyzing journal...'
    tranBlocks=Analyzer.journalParser(journal)
    
    print 'Printing transactions...'
    for i in range(1,tranBlocks[0]+1):
        fName='./transaction/transaction{0}_block{1}(journaloffset:{2}_location:{3})'.format(i,j,tranBlocks[i].journalOffset,tranBlocks[i].offset)
        f=open(fName,'wb')
        f.write(tranBlocks[i].content)
        f.close()

    print 'Analyzing CatalogFile...'
    Analyzer.CatalogFileAnalyzer(CatalogFile)

    print 'Analyzing transactions...'
    Analyzer.TransactionAnalyzer(tranBlocks,volumeHeader.specialFileFork,volumeHeader.blockSize)

    print 'Printing record list...'
    fName='./result/recordList.csv'
    f=open(fName,'w')
    for i in range(1,tranBlocks[0]+1):

        if 0<tranBlocks.blockType[0]<4:

            for j in range(tranBlocks.content.numRecords):

                f.write(str(tranBlocks[i]))
                f.write(str(tranBlocks.content.records[j]))

        else:

            f.write(str(tranBlocks[i]))

    f.close()
    '''
def main():

    Utility.DirectoryCleaning('transaction')
    Utility.DirectoryCleaning('result')

    if raw_input('Is disk connected? [y/n]: ').lower() == 'y':
        temp = Collector.VolumeFinder()
        disk = temp[0]
        select = temp[1]
        recovery = temp[2]

        print 'Collecting files...'
        temp = Collector.VolumeHeaderParser(
            Utility.DiskDump(disk, 'HFSPlusVolumeHeader', 1024, 1, 1, select))
        blockSize = temp[0]
        journalInfoBlock = temp[1]
        specialFileInfo = temp[2]

        AllocationFile = Collector.SpecialFileExtractor(
            'AllocationFile', specialFileInfo[0], disk, blockSize, select)
        ExtentsOverflowFile = Collector.SpecialFileExtractor(
            'ExtentsOverflowFile', specialFileInfo[1], disk, blockSize, select)
        CatalogFile = Collector.SpecialFileExtractor('CatalogFile',
                                                     specialFileInfo[2], disk,
                                                     blockSize, select)
        Journal = Collector.JournalExtractor(journalInfoBlock, disk, blockSize,
                                             select)

    else:

        path = raw_input('Input path of volume header\n')
        f = open(path, 'rb')

        temp = Collector.VolumeHeaderParser(f.read())
        blockSize = temp[0]
        journalInfoBlock = temp[1]
        specialFileInfo = temp[2]

        f.close()

        path = raw_input('Input path of journal\n')
        f = open(path, 'rb')
        Journal = f.read()
        f.close()

        if raw_input('Input path of catalog file(y/n)\n') == 'y':
            path = input()
            f = open(path, 'rb')
            CatalogFile = f.read()
            f.close()
        """
        if raw_input('Input path of allocation file(y/n)') == 'y':
        path=raw_input()
        f=open(path,'rb')
        AllocationFile=f.read()
        f.close()
        
        if raw_input('Input path of extents overflow file(y/n)\n') == 'y':
        path=raw_input()
        f=open(path,'rb')
        ExtentsOverflowFile=f.read()
        f.close()
        """
        recovery = False

    print 'Analyzing journal...'
    temp = Analyzer.JournalParser(Journal)
    sectorSize = temp[0]
    transaction = temp[1]

    print 'Printing transactions...'
    for i in range(1, transaction[0] + 1):
        for j in range(1, transaction[i][0] + 1):
            fName = './transaction/transaction' + str(i) + '_' + str(
                j) + '(sector' + hex(transaction[i][j][0]) + ')'
            f = open(fName, 'wb')
            f.write(transaction[i][j][1])
            f.close()

    print 'Analyzing CatalogFile...'
    nameAndParent = Analyzer.CatalogFileAnalyzer(CatalogFile)

    print 'Analyzing transactions...'
    Analyzer.TransactionAnalyzer(transaction, specialFileInfo[0],
                                 specialFileInfo[2], nameAndParent, sectorSize,
                                 blockSize)

    print 'Printing record list...'
    fName = './result/recordList.csv'
    f = open(fName, 'w')
    for i in range(1, transaction[0] + 1):
        for j in range(1, transaction[i][0] + 1):

            if transaction[i][j][1][0] == 'c':

                if transaction[i][j][1][1] == 'l' or transaction[i][j][1][
                        1] == 'i':

                    for k in transaction[i][j][2].keys():

                        f.write(
                            'transaction{0}_{1} record{2}\n'.format(i, j, k) +
                            ',')

                        for l in transaction[i][j][2][k].keys():

                            if l == 'nodeName':
                                f.write('{0} : {1}\n'.format(
                                    l, transaction[i][j][2][k][l].encode(
                                        'utf-8')) + ',')

                            else:
                                f.write('{0} : {1}\n'.format(
                                    l, transaction[i][j][2][k][l]) + ',')

                        f.write('\n')

                elif transaction[i][j][1][1] == 'h':

                    f.write('transaction{0}_{1}\n'.format(i, j) + ',')

                    for k in transaction[i][j][2].keys():
                        f.write(
                            '{0} : {1}\n'.format(k, transaction[i][j][2][k]) +
                            ',')

                    f.write('\n')

    f.close()

    print 'Deduplicating records...'
    deduplicatedRecord = Analyzer.RecordDeduplication(transaction)

    print 'Printing deduplicated records...'
    keyForType = ['recordType']
    keyForString = ['nodeName', 'fullPath']
    keyForID = ['parentID', 'CNID', 'ownerID', 'groupID']
    keyForDate = [
        'createDate', 'contentModDate', 'attributeModDate', 'accessDate'
    ]
    keyForFork = ['dataFork', 'resourceFork']

    fName = './result/deduplicatedRecordList.csv'
    f = open(fName, 'w')

    for i in keyForType:
        f.write('transaction' + ',' + i + ',')

    for i in keyForString:
        f.write(i + ',')

    for i in keyForID:
        f.write(i + ',')

    for i in keyForDate:
        f.write(i + ',')

    f.write('AllocatedFork' + ',')

    f.write('\n')

    for i in range(1, deduplicatedRecord[0][0] + 1):
        f.write(',')

        f.write('\ntransaction{0}\n'.format(i) + ',')

        for j in range(1, deduplicatedRecord[0][i][0] + 1):

            f.write('\ntransaction{0}_{1}\n'.format(i, j))

            for k in range(1, deduplicatedRecord[0][i][j][0] + 1):

                if deduplicatedRecord[0][i][j][k]['recordType'] < 3:

                    for l in keyForType:
                        if deduplicatedRecord[0][i][j][k][l] == 1:
                            f.write(',')
                            f.write('folder')
                        else:
                            f.write(',')
                            f.write('file')

                    for l in keyForString:
                        f.write(',')
                        f.write(
                            deduplicatedRecord[0][i][j][k][l].encode('utf-8'))
                    f.write(',')

                    for l in keyForID:
                        f.write(hex(deduplicatedRecord[0][i][j][k][l])[2:])
                    f.write(',')

                    for l in keyForDate:
                        f.write((datetime.datetime(1904, 1, 1) +
                                 datetime.timedelta(
                                     seconds=deduplicatedRecord[0][i][j][k][l])
                                 ).isoformat(' ') + ',')

                    if deduplicatedRecord[0][i][j][k]['recordType'] == 2:

                        duplicated = ''
                        allocatedFork = 0
                        for l in keyForFork:
                            for m in range(
                                    1,
                                    deduplicatedRecord[0][i][j][k][l][0] + 1):
                                if deduplicatedRecord[0][i][j][k][l][m][
                                        0] == 1:
                                    allocatedFork += 1
                                elif deduplicatedRecord[0][i][j][k][l][m][
                                        0] == -1:
                                    duplicated = '(duplicated)'

                        f.write('{0}/{1}{2}'.format(
                            allocatedFork,
                            deduplicatedRecord[0][i][j][k]['dataFork'][0] +
                            deduplicatedRecord[0][i][j][k]['resourceFork'][0],
                            duplicated) + ',')

                    f.write('\n')

    f.close()

    if recovery:
        print 'Recoverying deleted files...'
        Analyzer.DataRecovery(disk, deduplicatedRecord[2], blockSize)