tools v6.0.8
This commit is contained in:
parent
74a4c894cb
commit
b1feca321d
|
@ -24,19 +24,19 @@
|
||||||
<key>CFBundleExecutable</key>
|
<key>CFBundleExecutable</key>
|
||||||
<string>droplet</string>
|
<string>droplet</string>
|
||||||
<key>CFBundleGetInfoString</key>
|
<key>CFBundleGetInfoString</key>
|
||||||
<string>DeDRM AppleScript 6.0.7. Written 2010–2013 by Apprentice Alf and others.</string>
|
<string>DeDRM AppleScript 6.0.8. Written 2010–2013 by Apprentice Alf and others.</string>
|
||||||
<key>CFBundleIconFile</key>
|
<key>CFBundleIconFile</key>
|
||||||
<string>DeDRM</string>
|
<string>DeDRM</string>
|
||||||
<key>CFBundleIdentifier</key>
|
<key>CFBundleIdentifier</key>
|
||||||
<string>com.apple.ScriptEditor.id.707CCCD5-0C6C-4BEB-B67C-B6E866ADE85A</string>
|
<string>com.apple.ScriptEditor.id.707CCCD5-0C6C-4BEB-B67C-B6E866ADE85A</string>
|
||||||
<key>CFBundleInfoDictionaryVersion</key>
|
<key>CFBundleInfoDictionaryVersion</key>
|
||||||
<string>6.0.7</string>
|
<string>6.0.8</string>
|
||||||
<key>CFBundleName</key>
|
<key>CFBundleName</key>
|
||||||
<string>DeDRM</string>
|
<string>DeDRM</string>
|
||||||
<key>CFBundlePackageType</key>
|
<key>CFBundlePackageType</key>
|
||||||
<string>APPL</string>
|
<string>APPL</string>
|
||||||
<key>CFBundleShortVersionString</key>
|
<key>CFBundleShortVersionString</key>
|
||||||
<string>6.0.7</string>
|
<string>6.0.8</string>
|
||||||
<key>CFBundleSignature</key>
|
<key>CFBundleSignature</key>
|
||||||
<string>dplt</string>
|
<string>dplt</string>
|
||||||
<key>LSRequiresCarbon</key>
|
<key>LSRequiresCarbon</key>
|
||||||
|
|
|
@ -31,14 +31,17 @@ __docformat__ = 'restructuredtext en'
|
||||||
# 6.0.3 - Fixes for Kindle for Mac and Windows non-ascii user names
|
# 6.0.3 - Fixes for Kindle for Mac and Windows non-ascii user names
|
||||||
# 6.0.4 - Fixes for stand-alone scripts and applications
|
# 6.0.4 - Fixes for stand-alone scripts and applications
|
||||||
# and pdb files in plugin and initial conversion of prefs.
|
# and pdb files in plugin and initial conversion of prefs.
|
||||||
|
# 6.0.5 - Fix a key issue
|
||||||
# 6.0.6 - Fix up an incorrect function call
|
# 6.0.6 - Fix up an incorrect function call
|
||||||
|
# 6.0.7 - Error handling for incomplete PDF metadata
|
||||||
|
# 6.0.8 - Fixes a Wine key issue and topaz support
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypt DRMed ebooks.
|
Decrypt DRMed ebooks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
PLUGIN_NAME = u"DeDRM"
|
PLUGIN_NAME = u"DeDRM"
|
||||||
PLUGIN_VERSION_TUPLE = (6, 0, 7)
|
PLUGIN_VERSION_TUPLE = (6, 0, 8)
|
||||||
PLUGIN_VERSION = u".".join([unicode(str(x)) for x in PLUGIN_VERSION_TUPLE])
|
PLUGIN_VERSION = u".".join([unicode(str(x)) for x in PLUGIN_VERSION_TUPLE])
|
||||||
# Include an html helpfile in the plugin's zipfile with the following name.
|
# Include an html helpfile in the plugin's zipfile with the following name.
|
||||||
RESOURCE_NAME = PLUGIN_NAME + '_Help.htm'
|
RESOURCE_NAME = PLUGIN_NAME + '_Help.htm'
|
||||||
|
@ -313,7 +316,7 @@ class DeDRM(FileTypePlugin):
|
||||||
from wineutils import WineGetKeys
|
from wineutils import WineGetKeys
|
||||||
|
|
||||||
scriptpath = os.path.join(self.alfdir,u"adobekey.py")
|
scriptpath = os.path.join(self.alfdir,u"adobekey.py")
|
||||||
defaultkeys = self.WineGetKeys(scriptpath, u".der",dedrmprefs['adobewineprefix'])
|
defaultkeys = WineGetKeys(scriptpath, u".der",dedrmprefs['adobewineprefix'])
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -391,7 +394,7 @@ class DeDRM(FileTypePlugin):
|
||||||
from wineutils import WineGetKeys
|
from wineutils import WineGetKeys
|
||||||
|
|
||||||
scriptpath = os.path.join(self.alfdir,u"kindlekey.py")
|
scriptpath = os.path.join(self.alfdir,u"kindlekey.py")
|
||||||
defaultkeys = self.WineGetKeys(scriptpath, u".k4i",dedrmprefs['kindlewineprefix'])
|
defaultkeys = WineGetKeys(scriptpath, u".k4i",dedrmprefs['kindlewineprefix'])
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,157 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
#fileencoding: utf-8
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import zlib
|
||||||
|
import tarfile
|
||||||
|
from hashlib import md5
|
||||||
|
from cStringIO import StringIO
|
||||||
|
from binascii import a2b_hex, b2a_hex
|
||||||
|
|
||||||
|
STORAGE = 'AmazonSecureStorage.xml'
|
||||||
|
|
||||||
|
class AndroidObfuscation(object):
|
||||||
|
'''AndroidObfuscation
|
||||||
|
For the key, it's written in java, and run in android dalvikvm
|
||||||
|
'''
|
||||||
|
|
||||||
|
key = a2b_hex('0176e04c9408b1702d90be333fd53523')
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
cipher = self._get_cipher()
|
||||||
|
padding = len(self.key) - len(plaintext) % len(self.key)
|
||||||
|
plaintext += chr(padding) * padding
|
||||||
|
return b2a_hex(cipher.encrypt(plaintext))
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
cipher = self._get_cipher()
|
||||||
|
plaintext = cipher.decrypt(a2b_hex(ciphertext))
|
||||||
|
return plaintext[:-ord(plaintext[-1])]
|
||||||
|
|
||||||
|
def _get_cipher(self):
|
||||||
|
try:
|
||||||
|
from Crypto.Cipher import AES
|
||||||
|
return AES.new(self.key)
|
||||||
|
except ImportError:
|
||||||
|
from aescbc import AES, noPadding
|
||||||
|
return AES(self.key, padding=noPadding())
|
||||||
|
|
||||||
|
class AndroidObfuscationV2(AndroidObfuscation):
|
||||||
|
'''AndroidObfuscationV2
|
||||||
|
'''
|
||||||
|
|
||||||
|
count = 503
|
||||||
|
password = 'Thomsun was here!'
|
||||||
|
|
||||||
|
def __init__(self, salt):
|
||||||
|
key = self.password + salt
|
||||||
|
for _ in range(self.count):
|
||||||
|
key = md5(key).digest()
|
||||||
|
self.key = key[:8]
|
||||||
|
self.iv = key[8:16]
|
||||||
|
|
||||||
|
def _get_cipher(self):
|
||||||
|
try :
|
||||||
|
from Crypto.Cipher import DES
|
||||||
|
return DES.new(self.key, DES.MODE_CBC, self.iv)
|
||||||
|
except ImportError:
|
||||||
|
from python_des import Des, CBC
|
||||||
|
return Des(self.key, CBC, self.iv)
|
||||||
|
|
||||||
|
def parse_preference(path):
|
||||||
|
''' parse android's shared preference xml '''
|
||||||
|
storage = {}
|
||||||
|
read = open(path)
|
||||||
|
for line in read:
|
||||||
|
line = line.strip()
|
||||||
|
# <string name="key">value</string>
|
||||||
|
if line.startswith('<string name="'):
|
||||||
|
index = line.find('"', 14)
|
||||||
|
key = line[14:index]
|
||||||
|
value = line[index+2:-9]
|
||||||
|
storage[key] = value
|
||||||
|
read.close()
|
||||||
|
return storage
|
||||||
|
|
||||||
|
def get_serials(path=None):
|
||||||
|
''' get serials from android's shared preference xml '''
|
||||||
|
if path is None:
|
||||||
|
if not os.path.isfile(STORAGE):
|
||||||
|
if os.path.isfile("backup.ab"):
|
||||||
|
get_storage()
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
path = STORAGE
|
||||||
|
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
return []
|
||||||
|
|
||||||
|
storage = parse_preference(path)
|
||||||
|
salt = storage.get('AmazonSaltKey')
|
||||||
|
if salt and len(salt) == 16:
|
||||||
|
sys.stdout.write('Using AndroidObfuscationV2\n')
|
||||||
|
obfuscation = AndroidObfuscationV2(a2b_hex(salt))
|
||||||
|
else:
|
||||||
|
sys.stdout.write('Using AndroidObfuscation\n')
|
||||||
|
obfuscation = AndroidObfuscation()
|
||||||
|
|
||||||
|
def get_value(key):
|
||||||
|
encrypted_key = obfuscation.encrypt(key)
|
||||||
|
encrypted_value = storage.get(encrypted_key)
|
||||||
|
if encrypted_value:
|
||||||
|
return obfuscation.decrypt(encrypted_value)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
# also see getK4Pids in kgenpids.py
|
||||||
|
try:
|
||||||
|
dsnid = get_value('DsnId')
|
||||||
|
except:
|
||||||
|
sys.stderr.write('cannot get DsnId\n')
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
tokens = set(get_value('kindle.account.tokens').split(','))
|
||||||
|
except:
|
||||||
|
return [dsnid]
|
||||||
|
|
||||||
|
serials = []
|
||||||
|
for token in tokens:
|
||||||
|
if token:
|
||||||
|
serials.append('%s%s' % (dsnid, token))
|
||||||
|
serials.append(dsnid)
|
||||||
|
for token in tokens:
|
||||||
|
if token:
|
||||||
|
serials.append(token)
|
||||||
|
return serials
|
||||||
|
|
||||||
|
def get_storage(path='backup.ab'):
|
||||||
|
'''get AmazonSecureStorage.xml from android backup.ab
|
||||||
|
backup.ab can be get using adb command:
|
||||||
|
shell> adb backup com.amazon.kindle
|
||||||
|
'''
|
||||||
|
output = None
|
||||||
|
read = open(path, 'rb')
|
||||||
|
head = read.read(24)
|
||||||
|
if head == 'ANDROID BACKUP\n1\n1\nnone\n':
|
||||||
|
output = StringIO(zlib.decompress(read.read()))
|
||||||
|
read.close()
|
||||||
|
|
||||||
|
if not output:
|
||||||
|
return False
|
||||||
|
|
||||||
|
tar = tarfile.open(fileobj=output)
|
||||||
|
for member in tar.getmembers():
|
||||||
|
if member.name.strip().endswith(STORAGE):
|
||||||
|
write = open(STORAGE, 'w')
|
||||||
|
write.write(tar.extractfile(member).read())
|
||||||
|
write.close()
|
||||||
|
break
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
__all__ = [ 'get_storage', 'get_serials', 'parse_preference',
|
||||||
|
'AndroidObfuscation', 'AndroidObfuscationV2', 'STORAGE']
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print get_serials()
|
|
@ -0,0 +1,6 @@
|
||||||
|
1.1 get AmazonSecureStorage.xml from /data/data/com.amazon.kindle/shared_prefs/AmazonSecureStorage.xml
|
||||||
|
|
||||||
|
1.2 on android 4.0+, run `adb backup com.amazon.kindle` from PC will get backup.ab
|
||||||
|
now android.py can convert backup.ab to AmazonSecureStorage.xml
|
||||||
|
|
||||||
|
2. run `k4mobidedrm.py -a AmazonSecureStorage.xml <infile> <outdir>'
|
|
@ -458,7 +458,11 @@ class DocParser(object):
|
||||||
(wtype, num) = pdesc[j]
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
if wtype == 'ocr' :
|
if wtype == 'ocr' :
|
||||||
word = self.ocrtext[num]
|
try:
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
except:
|
||||||
|
word = ""
|
||||||
|
|
||||||
sep = ' '
|
sep = ' '
|
||||||
|
|
||||||
if handle_links:
|
if handle_links:
|
||||||
|
|
|
@ -80,10 +80,12 @@ if inCalibre:
|
||||||
from calibre_plugins.dedrm import mobidedrm
|
from calibre_plugins.dedrm import mobidedrm
|
||||||
from calibre_plugins.dedrm import topazextract
|
from calibre_plugins.dedrm import topazextract
|
||||||
from calibre_plugins.dedrm import kgenpids
|
from calibre_plugins.dedrm import kgenpids
|
||||||
|
from calibre_plugins.dedrm import android
|
||||||
else:
|
else:
|
||||||
import mobidedrm
|
import mobidedrm
|
||||||
import topazextract
|
import topazextract
|
||||||
import kgenpids
|
import kgenpids
|
||||||
|
import android
|
||||||
|
|
||||||
# Wrap a stream so that output gets flushed immediately
|
# Wrap a stream so that output gets flushed immediately
|
||||||
# and also make sure that any unicode strings get
|
# and also make sure that any unicode strings get
|
||||||
|
@ -273,7 +275,7 @@ def decryptBook(infile, outdir, kDatabaseFiles, serials, pids):
|
||||||
def usage(progname):
|
def usage(progname):
|
||||||
print u"Removes DRM protection from Mobipocket, Amazon KF8, Amazon Print Replica and Amazon Topaz ebooks"
|
print u"Removes DRM protection from Mobipocket, Amazon KF8, Amazon Print Replica and Amazon Topaz ebooks"
|
||||||
print u"Usage:"
|
print u"Usage:"
|
||||||
print u" {0} [-k <kindle.k4i>] [-p <comma separated PIDs>] [-s <comma separated Kindle serial numbers>] <infile> <outdir>".format(progname)
|
print u" {0} [-k <kindle.k4i>] [-p <comma separated PIDs>] [-s <comma separated Kindle serial numbers>] [ -a <AmazonSecureStorage.xml> ] <infile> <outdir>".format(progname)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Main
|
# Main
|
||||||
|
@ -284,7 +286,7 @@ def cli_main():
|
||||||
print u"K4MobiDeDrm v{0}.\nCopyright © 2008-2013 The Dark Reverser et al.".format(__version__)
|
print u"K4MobiDeDrm v{0}.\nCopyright © 2008-2013 The Dark Reverser et al.".format(__version__)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(argv[1:], "k:p:s:")
|
opts, args = getopt.getopt(argv[1:], "k:p:s:a:")
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
print u"Error in options or arguments: {0}".format(err.args[0])
|
print u"Error in options or arguments: {0}".format(err.args[0])
|
||||||
usage(progname)
|
usage(progname)
|
||||||
|
@ -312,6 +314,11 @@ def cli_main():
|
||||||
if a == None :
|
if a == None :
|
||||||
raise DrmException("Invalid parameter for -s")
|
raise DrmException("Invalid parameter for -s")
|
||||||
serials = a.split(',')
|
serials = a.split(',')
|
||||||
|
if o == '-a':
|
||||||
|
if a == None:
|
||||||
|
continue
|
||||||
|
serials.extend(android.get_serials(a))
|
||||||
|
serials.extend(android.get_serials())
|
||||||
|
|
||||||
# try with built in Kindle Info files if not on Linux
|
# try with built in Kindle Info files if not on Linux
|
||||||
k4 = not sys.platform.startswith('linux')
|
k4 = not sys.platform.startswith('linux')
|
||||||
|
|
|
@ -19,6 +19,7 @@ from __future__ import with_statement
|
||||||
# 1.6 - Fixed a problem getting the disk serial numbers
|
# 1.6 - Fixed a problem getting the disk serial numbers
|
||||||
# 1.7 - Work if TkInter is missing
|
# 1.7 - Work if TkInter is missing
|
||||||
# 1.8 - Fixes for Kindle for Mac, and non-ascii in Windows user names
|
# 1.8 - Fixes for Kindle for Mac, and non-ascii in Windows user names
|
||||||
|
# 1.9 - Fixes for Unicode in Windows user names
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -26,7 +27,7 @@ Retrieve Kindle for PC/Mac user key.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__version__ = '1.8'
|
__version__ = '1.9'
|
||||||
|
|
||||||
import sys, os, re
|
import sys, os, re
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
@ -907,18 +908,34 @@ if iswindows:
|
||||||
return CryptUnprotectData
|
return CryptUnprotectData
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
|
# Returns Environmental Variables that contain unicode
|
||||||
|
def getEnvironmentVariable(name):
|
||||||
|
import ctypes
|
||||||
|
name = unicode(name) # make sure string argument is unicode
|
||||||
|
n = ctypes.windll.kernel32.GetEnvironmentVariableW(name, None, 0)
|
||||||
|
if n == 0:
|
||||||
|
return None
|
||||||
|
buf = ctypes.create_unicode_buffer(u'\0'*n)
|
||||||
|
ctypes.windll.kernel32.GetEnvironmentVariableW(name, buf, n)
|
||||||
|
return buf.value
|
||||||
|
|
||||||
# Locate all of the kindle-info style files and return as list
|
# Locate all of the kindle-info style files and return as list
|
||||||
def getKindleInfoFiles():
|
def getKindleInfoFiles():
|
||||||
kInfoFiles = []
|
kInfoFiles = []
|
||||||
# some 64 bit machines do not have the proper registry key for some reason
|
# some 64 bit machines do not have the proper registry key for some reason
|
||||||
# or the pythonn interface to the 32 vs 64 bit registry is broken
|
# or the python interface to the 32 vs 64 bit registry is broken
|
||||||
path = ""
|
path = ""
|
||||||
if 'LOCALAPPDATA' in os.environ.keys():
|
if 'LOCALAPPDATA' in os.environ.keys():
|
||||||
path = os.environ['LOCALAPPDATA']
|
# Python 2.x does not return unicode env. Use Python 3.x
|
||||||
|
path = winreg.ExpandEnvironmentStrings(u"%LOCALAPPDATA%")
|
||||||
|
# this is just another alternative.
|
||||||
|
# path = getEnvironmentVariable('LOCALAPPDATA')
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
path = ""
|
||||||
else:
|
else:
|
||||||
# User Shell Folders show take precedent over Shell Folders if present
|
# User Shell Folders show take precedent over Shell Folders if present
|
||||||
try:
|
try:
|
||||||
|
# this will still break
|
||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\User Shell Folders\\")
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\User Shell Folders\\")
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
|
@ -937,13 +954,14 @@ if iswindows:
|
||||||
if path == "":
|
if path == "":
|
||||||
print ('Could not find the folder in which to look for kinfoFiles.')
|
print ('Could not find the folder in which to look for kinfoFiles.')
|
||||||
else:
|
else:
|
||||||
print('searching for kinfoFiles in ' + path)
|
# Probably not the best. To Fix (shouldn't ignore in encoding) or use utf-8
|
||||||
|
print(u'searching for kinfoFiles in ' + path.encode('ascii', 'ignore'))
|
||||||
|
|
||||||
# look for (K4PC 1.9.0 and later) .kinf2011 file
|
# look for (K4PC 1.9.0 and later) .kinf2011 file
|
||||||
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
||||||
if os.path.isfile(kinfopath):
|
if os.path.isfile(kinfopath):
|
||||||
found = True
|
found = True
|
||||||
print('Found K4PC 1.9+ kinf2011 file: ' + kinfopath)
|
print('Found K4PC 1.9+ kinf2011 file: ' + kinfopath.encode('ascii','ignore'))
|
||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
# look for (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
# look for (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
@ -1142,7 +1160,7 @@ if iswindows:
|
||||||
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
DB[keyname] = cleartext
|
DB[keyname] = cleartext
|
||||||
|
|
||||||
if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB:
|
if 'kindle.account.tokens' in DB:
|
||||||
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(GetIDString(), GetUserName().decode("latin-1"))
|
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(GetIDString(), GetUserName().decode("latin-1"))
|
||||||
# store values used in decryption
|
# store values used in decryption
|
||||||
DB['IDString'] = GetIDString()
|
DB['IDString'] = GetIDString()
|
||||||
|
@ -1758,7 +1776,7 @@ elif isosx:
|
||||||
break
|
break
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB:
|
if 'kindle.account.tokens' in DB:
|
||||||
# store values used in decryption
|
# store values used in decryption
|
||||||
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(IDString, GetUserName())
|
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(IDString, GetUserName())
|
||||||
DB['IDString'] = IDString
|
DB['IDString'] = IDString
|
||||||
|
|
|
@ -156,6 +156,8 @@ def PC1(key, src, decryption=True):
|
||||||
return Pukall_Cipher().PC1(key,src,decryption)
|
return Pukall_Cipher().PC1(key,src,decryption)
|
||||||
except NameError:
|
except NameError:
|
||||||
pass
|
pass
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
# use slow python version, since Pukall_Cipher didn't load
|
# use slow python version, since Pukall_Cipher didn't load
|
||||||
sum1 = 0;
|
sum1 = 0;
|
||||||
|
|
|
@ -178,7 +178,12 @@ class DocParser(object):
|
||||||
if val == "":
|
if val == "":
|
||||||
val = 0
|
val = 0
|
||||||
|
|
||||||
if not ((attr == 'hang') and (int(val) == 0)) :
|
if not ((attr == 'hang') and (int(val) == 0)):
|
||||||
|
try:
|
||||||
|
f = float(val)
|
||||||
|
except:
|
||||||
|
print "Warning: unrecognised val, ignoring"
|
||||||
|
val = 0
|
||||||
pv = float(val)/scale
|
pv = float(val)/scale
|
||||||
cssargs[attr] = (self.attr_val_map[attr], pv)
|
cssargs[attr] = (self.attr_val_map[attr], pv)
|
||||||
keep = True
|
keep = True
|
||||||
|
|
|
@ -356,7 +356,7 @@ class TopazBook:
|
||||||
|
|
||||||
self.setBookKey(bookKey)
|
self.setBookKey(bookKey)
|
||||||
self.createBookDirectory()
|
self.createBookDirectory()
|
||||||
self.extractFiles()
|
self.extractFiles()
|
||||||
print u"Successfully Extracted Topaz contents"
|
print u"Successfully Extracted Topaz contents"
|
||||||
if inCalibre:
|
if inCalibre:
|
||||||
from calibre_plugins.dedrm import genbook
|
from calibre_plugins.dedrm import genbook
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
# 6.0.4 - Fix for other potential unicode problems
|
# 6.0.4 - Fix for other potential unicode problems
|
||||||
# 6.0.5 - Fix typo
|
# 6.0.5 - Fix typo
|
||||||
|
|
||||||
__version__ = '6.0.7'
|
__version__ = '6.0.8'
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os, os.path
|
import os, os.path
|
||||||
|
|
|
@ -31,14 +31,17 @@ __docformat__ = 'restructuredtext en'
|
||||||
# 6.0.3 - Fixes for Kindle for Mac and Windows non-ascii user names
|
# 6.0.3 - Fixes for Kindle for Mac and Windows non-ascii user names
|
||||||
# 6.0.4 - Fixes for stand-alone scripts and applications
|
# 6.0.4 - Fixes for stand-alone scripts and applications
|
||||||
# and pdb files in plugin and initial conversion of prefs.
|
# and pdb files in plugin and initial conversion of prefs.
|
||||||
|
# 6.0.5 - Fix a key issue
|
||||||
# 6.0.6 - Fix up an incorrect function call
|
# 6.0.6 - Fix up an incorrect function call
|
||||||
|
# 6.0.7 - Error handling for incomplete PDF metadata
|
||||||
|
# 6.0.8 - Fixes a Wine key issue and topaz support
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypt DRMed ebooks.
|
Decrypt DRMed ebooks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
PLUGIN_NAME = u"DeDRM"
|
PLUGIN_NAME = u"DeDRM"
|
||||||
PLUGIN_VERSION_TUPLE = (6, 0, 7)
|
PLUGIN_VERSION_TUPLE = (6, 0, 8)
|
||||||
PLUGIN_VERSION = u".".join([unicode(str(x)) for x in PLUGIN_VERSION_TUPLE])
|
PLUGIN_VERSION = u".".join([unicode(str(x)) for x in PLUGIN_VERSION_TUPLE])
|
||||||
# Include an html helpfile in the plugin's zipfile with the following name.
|
# Include an html helpfile in the plugin's zipfile with the following name.
|
||||||
RESOURCE_NAME = PLUGIN_NAME + '_Help.htm'
|
RESOURCE_NAME = PLUGIN_NAME + '_Help.htm'
|
||||||
|
@ -313,7 +316,7 @@ class DeDRM(FileTypePlugin):
|
||||||
from wineutils import WineGetKeys
|
from wineutils import WineGetKeys
|
||||||
|
|
||||||
scriptpath = os.path.join(self.alfdir,u"adobekey.py")
|
scriptpath = os.path.join(self.alfdir,u"adobekey.py")
|
||||||
defaultkeys = self.WineGetKeys(scriptpath, u".der",dedrmprefs['adobewineprefix'])
|
defaultkeys = WineGetKeys(scriptpath, u".der",dedrmprefs['adobewineprefix'])
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -391,7 +394,7 @@ class DeDRM(FileTypePlugin):
|
||||||
from wineutils import WineGetKeys
|
from wineutils import WineGetKeys
|
||||||
|
|
||||||
scriptpath = os.path.join(self.alfdir,u"kindlekey.py")
|
scriptpath = os.path.join(self.alfdir,u"kindlekey.py")
|
||||||
defaultkeys = self.WineGetKeys(scriptpath, u".k4i",dedrmprefs['kindlewineprefix'])
|
defaultkeys = WineGetKeys(scriptpath, u".k4i",dedrmprefs['kindlewineprefix'])
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,157 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
#fileencoding: utf-8
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import zlib
|
||||||
|
import tarfile
|
||||||
|
from hashlib import md5
|
||||||
|
from cStringIO import StringIO
|
||||||
|
from binascii import a2b_hex, b2a_hex
|
||||||
|
|
||||||
|
STORAGE = 'AmazonSecureStorage.xml'
|
||||||
|
|
||||||
|
class AndroidObfuscation(object):
|
||||||
|
'''AndroidObfuscation
|
||||||
|
For the key, it's written in java, and run in android dalvikvm
|
||||||
|
'''
|
||||||
|
|
||||||
|
key = a2b_hex('0176e04c9408b1702d90be333fd53523')
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
cipher = self._get_cipher()
|
||||||
|
padding = len(self.key) - len(plaintext) % len(self.key)
|
||||||
|
plaintext += chr(padding) * padding
|
||||||
|
return b2a_hex(cipher.encrypt(plaintext))
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
cipher = self._get_cipher()
|
||||||
|
plaintext = cipher.decrypt(a2b_hex(ciphertext))
|
||||||
|
return plaintext[:-ord(plaintext[-1])]
|
||||||
|
|
||||||
|
def _get_cipher(self):
|
||||||
|
try:
|
||||||
|
from Crypto.Cipher import AES
|
||||||
|
return AES.new(self.key)
|
||||||
|
except ImportError:
|
||||||
|
from aescbc import AES, noPadding
|
||||||
|
return AES(self.key, padding=noPadding())
|
||||||
|
|
||||||
|
class AndroidObfuscationV2(AndroidObfuscation):
|
||||||
|
'''AndroidObfuscationV2
|
||||||
|
'''
|
||||||
|
|
||||||
|
count = 503
|
||||||
|
password = 'Thomsun was here!'
|
||||||
|
|
||||||
|
def __init__(self, salt):
|
||||||
|
key = self.password + salt
|
||||||
|
for _ in range(self.count):
|
||||||
|
key = md5(key).digest()
|
||||||
|
self.key = key[:8]
|
||||||
|
self.iv = key[8:16]
|
||||||
|
|
||||||
|
def _get_cipher(self):
|
||||||
|
try :
|
||||||
|
from Crypto.Cipher import DES
|
||||||
|
return DES.new(self.key, DES.MODE_CBC, self.iv)
|
||||||
|
except ImportError:
|
||||||
|
from python_des import Des, CBC
|
||||||
|
return Des(self.key, CBC, self.iv)
|
||||||
|
|
||||||
|
def parse_preference(path):
|
||||||
|
''' parse android's shared preference xml '''
|
||||||
|
storage = {}
|
||||||
|
read = open(path)
|
||||||
|
for line in read:
|
||||||
|
line = line.strip()
|
||||||
|
# <string name="key">value</string>
|
||||||
|
if line.startswith('<string name="'):
|
||||||
|
index = line.find('"', 14)
|
||||||
|
key = line[14:index]
|
||||||
|
value = line[index+2:-9]
|
||||||
|
storage[key] = value
|
||||||
|
read.close()
|
||||||
|
return storage
|
||||||
|
|
||||||
|
def get_serials(path=None):
|
||||||
|
''' get serials from android's shared preference xml '''
|
||||||
|
if path is None:
|
||||||
|
if not os.path.isfile(STORAGE):
|
||||||
|
if os.path.isfile("backup.ab"):
|
||||||
|
get_storage()
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
path = STORAGE
|
||||||
|
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
return []
|
||||||
|
|
||||||
|
storage = parse_preference(path)
|
||||||
|
salt = storage.get('AmazonSaltKey')
|
||||||
|
if salt and len(salt) == 16:
|
||||||
|
sys.stdout.write('Using AndroidObfuscationV2\n')
|
||||||
|
obfuscation = AndroidObfuscationV2(a2b_hex(salt))
|
||||||
|
else:
|
||||||
|
sys.stdout.write('Using AndroidObfuscation\n')
|
||||||
|
obfuscation = AndroidObfuscation()
|
||||||
|
|
||||||
|
def get_value(key):
|
||||||
|
encrypted_key = obfuscation.encrypt(key)
|
||||||
|
encrypted_value = storage.get(encrypted_key)
|
||||||
|
if encrypted_value:
|
||||||
|
return obfuscation.decrypt(encrypted_value)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
# also see getK4Pids in kgenpids.py
|
||||||
|
try:
|
||||||
|
dsnid = get_value('DsnId')
|
||||||
|
except:
|
||||||
|
sys.stderr.write('cannot get DsnId\n')
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
tokens = set(get_value('kindle.account.tokens').split(','))
|
||||||
|
except:
|
||||||
|
return [dsnid]
|
||||||
|
|
||||||
|
serials = []
|
||||||
|
for token in tokens:
|
||||||
|
if token:
|
||||||
|
serials.append('%s%s' % (dsnid, token))
|
||||||
|
serials.append(dsnid)
|
||||||
|
for token in tokens:
|
||||||
|
if token:
|
||||||
|
serials.append(token)
|
||||||
|
return serials
|
||||||
|
|
||||||
|
def get_storage(path='backup.ab'):
|
||||||
|
'''get AmazonSecureStorage.xml from android backup.ab
|
||||||
|
backup.ab can be get using adb command:
|
||||||
|
shell> adb backup com.amazon.kindle
|
||||||
|
'''
|
||||||
|
output = None
|
||||||
|
read = open(path, 'rb')
|
||||||
|
head = read.read(24)
|
||||||
|
if head == 'ANDROID BACKUP\n1\n1\nnone\n':
|
||||||
|
output = StringIO(zlib.decompress(read.read()))
|
||||||
|
read.close()
|
||||||
|
|
||||||
|
if not output:
|
||||||
|
return False
|
||||||
|
|
||||||
|
tar = tarfile.open(fileobj=output)
|
||||||
|
for member in tar.getmembers():
|
||||||
|
if member.name.strip().endswith(STORAGE):
|
||||||
|
write = open(STORAGE, 'w')
|
||||||
|
write.write(tar.extractfile(member).read())
|
||||||
|
write.close()
|
||||||
|
break
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
__all__ = [ 'get_storage', 'get_serials', 'parse_preference',
|
||||||
|
'AndroidObfuscation', 'AndroidObfuscationV2', 'STORAGE']
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print get_serials()
|
|
@ -0,0 +1,6 @@
|
||||||
|
1.1 get AmazonSecureStorage.xml from /data/data/com.amazon.kindle/shared_prefs/AmazonSecureStorage.xml
|
||||||
|
|
||||||
|
1.2 on android 4.0+, run `adb backup com.amazon.kindle` from PC will get backup.ab
|
||||||
|
now android.py can convert backup.ab to AmazonSecureStorage.xml
|
||||||
|
|
||||||
|
2. run `k4mobidedrm.py -a AmazonSecureStorage.xml <infile> <outdir>'
|
|
@ -458,7 +458,11 @@ class DocParser(object):
|
||||||
(wtype, num) = pdesc[j]
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
if wtype == 'ocr' :
|
if wtype == 'ocr' :
|
||||||
word = self.ocrtext[num]
|
try:
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
except:
|
||||||
|
word = ""
|
||||||
|
|
||||||
sep = ' '
|
sep = ' '
|
||||||
|
|
||||||
if handle_links:
|
if handle_links:
|
||||||
|
|
|
@ -80,10 +80,12 @@ if inCalibre:
|
||||||
from calibre_plugins.dedrm import mobidedrm
|
from calibre_plugins.dedrm import mobidedrm
|
||||||
from calibre_plugins.dedrm import topazextract
|
from calibre_plugins.dedrm import topazextract
|
||||||
from calibre_plugins.dedrm import kgenpids
|
from calibre_plugins.dedrm import kgenpids
|
||||||
|
from calibre_plugins.dedrm import android
|
||||||
else:
|
else:
|
||||||
import mobidedrm
|
import mobidedrm
|
||||||
import topazextract
|
import topazextract
|
||||||
import kgenpids
|
import kgenpids
|
||||||
|
import android
|
||||||
|
|
||||||
# Wrap a stream so that output gets flushed immediately
|
# Wrap a stream so that output gets flushed immediately
|
||||||
# and also make sure that any unicode strings get
|
# and also make sure that any unicode strings get
|
||||||
|
@ -273,7 +275,7 @@ def decryptBook(infile, outdir, kDatabaseFiles, serials, pids):
|
||||||
def usage(progname):
|
def usage(progname):
|
||||||
print u"Removes DRM protection from Mobipocket, Amazon KF8, Amazon Print Replica and Amazon Topaz ebooks"
|
print u"Removes DRM protection from Mobipocket, Amazon KF8, Amazon Print Replica and Amazon Topaz ebooks"
|
||||||
print u"Usage:"
|
print u"Usage:"
|
||||||
print u" {0} [-k <kindle.k4i>] [-p <comma separated PIDs>] [-s <comma separated Kindle serial numbers>] <infile> <outdir>".format(progname)
|
print u" {0} [-k <kindle.k4i>] [-p <comma separated PIDs>] [-s <comma separated Kindle serial numbers>] [ -a <AmazonSecureStorage.xml> ] <infile> <outdir>".format(progname)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Main
|
# Main
|
||||||
|
@ -284,7 +286,7 @@ def cli_main():
|
||||||
print u"K4MobiDeDrm v{0}.\nCopyright © 2008-2013 The Dark Reverser et al.".format(__version__)
|
print u"K4MobiDeDrm v{0}.\nCopyright © 2008-2013 The Dark Reverser et al.".format(__version__)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(argv[1:], "k:p:s:")
|
opts, args = getopt.getopt(argv[1:], "k:p:s:a:")
|
||||||
except getopt.GetoptError, err:
|
except getopt.GetoptError, err:
|
||||||
print u"Error in options or arguments: {0}".format(err.args[0])
|
print u"Error in options or arguments: {0}".format(err.args[0])
|
||||||
usage(progname)
|
usage(progname)
|
||||||
|
@ -312,6 +314,11 @@ def cli_main():
|
||||||
if a == None :
|
if a == None :
|
||||||
raise DrmException("Invalid parameter for -s")
|
raise DrmException("Invalid parameter for -s")
|
||||||
serials = a.split(',')
|
serials = a.split(',')
|
||||||
|
if o == '-a':
|
||||||
|
if a == None:
|
||||||
|
continue
|
||||||
|
serials.extend(android.get_serials(a))
|
||||||
|
serials.extend(android.get_serials())
|
||||||
|
|
||||||
# try with built in Kindle Info files if not on Linux
|
# try with built in Kindle Info files if not on Linux
|
||||||
k4 = not sys.platform.startswith('linux')
|
k4 = not sys.platform.startswith('linux')
|
||||||
|
|
|
@ -19,6 +19,7 @@ from __future__ import with_statement
|
||||||
# 1.6 - Fixed a problem getting the disk serial numbers
|
# 1.6 - Fixed a problem getting the disk serial numbers
|
||||||
# 1.7 - Work if TkInter is missing
|
# 1.7 - Work if TkInter is missing
|
||||||
# 1.8 - Fixes for Kindle for Mac, and non-ascii in Windows user names
|
# 1.8 - Fixes for Kindle for Mac, and non-ascii in Windows user names
|
||||||
|
# 1.9 - Fixes for Unicode in Windows user names
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -26,7 +27,7 @@ Retrieve Kindle for PC/Mac user key.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__version__ = '1.8'
|
__version__ = '1.9'
|
||||||
|
|
||||||
import sys, os, re
|
import sys, os, re
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
@ -907,18 +908,34 @@ if iswindows:
|
||||||
return CryptUnprotectData
|
return CryptUnprotectData
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
|
# Returns Environmental Variables that contain unicode
|
||||||
|
def getEnvironmentVariable(name):
|
||||||
|
import ctypes
|
||||||
|
name = unicode(name) # make sure string argument is unicode
|
||||||
|
n = ctypes.windll.kernel32.GetEnvironmentVariableW(name, None, 0)
|
||||||
|
if n == 0:
|
||||||
|
return None
|
||||||
|
buf = ctypes.create_unicode_buffer(u'\0'*n)
|
||||||
|
ctypes.windll.kernel32.GetEnvironmentVariableW(name, buf, n)
|
||||||
|
return buf.value
|
||||||
|
|
||||||
# Locate all of the kindle-info style files and return as list
|
# Locate all of the kindle-info style files and return as list
|
||||||
def getKindleInfoFiles():
|
def getKindleInfoFiles():
|
||||||
kInfoFiles = []
|
kInfoFiles = []
|
||||||
# some 64 bit machines do not have the proper registry key for some reason
|
# some 64 bit machines do not have the proper registry key for some reason
|
||||||
# or the pythonn interface to the 32 vs 64 bit registry is broken
|
# or the python interface to the 32 vs 64 bit registry is broken
|
||||||
path = ""
|
path = ""
|
||||||
if 'LOCALAPPDATA' in os.environ.keys():
|
if 'LOCALAPPDATA' in os.environ.keys():
|
||||||
path = os.environ['LOCALAPPDATA']
|
# Python 2.x does not return unicode env. Use Python 3.x
|
||||||
|
path = winreg.ExpandEnvironmentStrings(u"%LOCALAPPDATA%")
|
||||||
|
# this is just another alternative.
|
||||||
|
# path = getEnvironmentVariable('LOCALAPPDATA')
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
path = ""
|
||||||
else:
|
else:
|
||||||
# User Shell Folders show take precedent over Shell Folders if present
|
# User Shell Folders show take precedent over Shell Folders if present
|
||||||
try:
|
try:
|
||||||
|
# this will still break
|
||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\User Shell Folders\\")
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\User Shell Folders\\")
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
|
@ -937,13 +954,14 @@ if iswindows:
|
||||||
if path == "":
|
if path == "":
|
||||||
print ('Could not find the folder in which to look for kinfoFiles.')
|
print ('Could not find the folder in which to look for kinfoFiles.')
|
||||||
else:
|
else:
|
||||||
print('searching for kinfoFiles in ' + path)
|
# Probably not the best. To Fix (shouldn't ignore in encoding) or use utf-8
|
||||||
|
print(u'searching for kinfoFiles in ' + path.encode('ascii', 'ignore'))
|
||||||
|
|
||||||
# look for (K4PC 1.9.0 and later) .kinf2011 file
|
# look for (K4PC 1.9.0 and later) .kinf2011 file
|
||||||
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
||||||
if os.path.isfile(kinfopath):
|
if os.path.isfile(kinfopath):
|
||||||
found = True
|
found = True
|
||||||
print('Found K4PC 1.9+ kinf2011 file: ' + kinfopath)
|
print('Found K4PC 1.9+ kinf2011 file: ' + kinfopath.encode('ascii','ignore'))
|
||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
# look for (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
# look for (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
@ -1142,7 +1160,7 @@ if iswindows:
|
||||||
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
DB[keyname] = cleartext
|
DB[keyname] = cleartext
|
||||||
|
|
||||||
if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB:
|
if 'kindle.account.tokens' in DB:
|
||||||
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(GetIDString(), GetUserName().decode("latin-1"))
|
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(GetIDString(), GetUserName().decode("latin-1"))
|
||||||
# store values used in decryption
|
# store values used in decryption
|
||||||
DB['IDString'] = GetIDString()
|
DB['IDString'] = GetIDString()
|
||||||
|
@ -1758,7 +1776,7 @@ elif isosx:
|
||||||
break
|
break
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB:
|
if 'kindle.account.tokens' in DB:
|
||||||
# store values used in decryption
|
# store values used in decryption
|
||||||
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(IDString, GetUserName())
|
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(IDString, GetUserName())
|
||||||
DB['IDString'] = IDString
|
DB['IDString'] = IDString
|
||||||
|
|
|
@ -156,6 +156,8 @@ def PC1(key, src, decryption=True):
|
||||||
return Pukall_Cipher().PC1(key,src,decryption)
|
return Pukall_Cipher().PC1(key,src,decryption)
|
||||||
except NameError:
|
except NameError:
|
||||||
pass
|
pass
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
# use slow python version, since Pukall_Cipher didn't load
|
# use slow python version, since Pukall_Cipher didn't load
|
||||||
sum1 = 0;
|
sum1 = 0;
|
||||||
|
|
|
@ -178,7 +178,12 @@ class DocParser(object):
|
||||||
if val == "":
|
if val == "":
|
||||||
val = 0
|
val = 0
|
||||||
|
|
||||||
if not ((attr == 'hang') and (int(val) == 0)) :
|
if not ((attr == 'hang') and (int(val) == 0)):
|
||||||
|
try:
|
||||||
|
f = float(val)
|
||||||
|
except:
|
||||||
|
print "Warning: unrecognised val, ignoring"
|
||||||
|
val = 0
|
||||||
pv = float(val)/scale
|
pv = float(val)/scale
|
||||||
cssargs[attr] = (self.attr_val_map[attr], pv)
|
cssargs[attr] = (self.attr_val_map[attr], pv)
|
||||||
keep = True
|
keep = True
|
||||||
|
|
|
@ -356,7 +356,7 @@ class TopazBook:
|
||||||
|
|
||||||
self.setBookKey(bookKey)
|
self.setBookKey(bookKey)
|
||||||
self.createBookDirectory()
|
self.createBookDirectory()
|
||||||
self.extractFiles()
|
self.extractFiles()
|
||||||
print u"Successfully Extracted Topaz contents"
|
print u"Successfully Extracted Topaz contents"
|
||||||
if inCalibre:
|
if inCalibre:
|
||||||
from calibre_plugins.dedrm import genbook
|
from calibre_plugins.dedrm import genbook
|
||||||
|
|
Binary file not shown.
|
@ -31,14 +31,17 @@ __docformat__ = 'restructuredtext en'
|
||||||
# 6.0.3 - Fixes for Kindle for Mac and Windows non-ascii user names
|
# 6.0.3 - Fixes for Kindle for Mac and Windows non-ascii user names
|
||||||
# 6.0.4 - Fixes for stand-alone scripts and applications
|
# 6.0.4 - Fixes for stand-alone scripts and applications
|
||||||
# and pdb files in plugin and initial conversion of prefs.
|
# and pdb files in plugin and initial conversion of prefs.
|
||||||
|
# 6.0.5 - Fix a key issue
|
||||||
# 6.0.6 - Fix up an incorrect function call
|
# 6.0.6 - Fix up an incorrect function call
|
||||||
|
# 6.0.7 - Error handling for incomplete PDF metadata
|
||||||
|
# 6.0.8 - Fixes a Wine key issue and topaz support
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypt DRMed ebooks.
|
Decrypt DRMed ebooks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
PLUGIN_NAME = u"DeDRM"
|
PLUGIN_NAME = u"DeDRM"
|
||||||
PLUGIN_VERSION_TUPLE = (6, 0, 7)
|
PLUGIN_VERSION_TUPLE = (6, 0, 8)
|
||||||
PLUGIN_VERSION = u".".join([unicode(str(x)) for x in PLUGIN_VERSION_TUPLE])
|
PLUGIN_VERSION = u".".join([unicode(str(x)) for x in PLUGIN_VERSION_TUPLE])
|
||||||
# Include an html helpfile in the plugin's zipfile with the following name.
|
# Include an html helpfile in the plugin's zipfile with the following name.
|
||||||
RESOURCE_NAME = PLUGIN_NAME + '_Help.htm'
|
RESOURCE_NAME = PLUGIN_NAME + '_Help.htm'
|
||||||
|
@ -313,7 +316,7 @@ class DeDRM(FileTypePlugin):
|
||||||
from wineutils import WineGetKeys
|
from wineutils import WineGetKeys
|
||||||
|
|
||||||
scriptpath = os.path.join(self.alfdir,u"adobekey.py")
|
scriptpath = os.path.join(self.alfdir,u"adobekey.py")
|
||||||
defaultkeys = self.WineGetKeys(scriptpath, u".der",dedrmprefs['adobewineprefix'])
|
defaultkeys = WineGetKeys(scriptpath, u".der",dedrmprefs['adobewineprefix'])
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -391,7 +394,7 @@ class DeDRM(FileTypePlugin):
|
||||||
from wineutils import WineGetKeys
|
from wineutils import WineGetKeys
|
||||||
|
|
||||||
scriptpath = os.path.join(self.alfdir,u"kindlekey.py")
|
scriptpath = os.path.join(self.alfdir,u"kindlekey.py")
|
||||||
defaultkeys = self.WineGetKeys(scriptpath, u".k4i",dedrmprefs['kindlewineprefix'])
|
defaultkeys = WineGetKeys(scriptpath, u".k4i",dedrmprefs['kindlewineprefix'])
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,157 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
#fileencoding: utf-8
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import zlib
|
||||||
|
import tarfile
|
||||||
|
from hashlib import md5
|
||||||
|
from cStringIO import StringIO
|
||||||
|
from binascii import a2b_hex, b2a_hex
|
||||||
|
|
||||||
|
STORAGE = 'AmazonSecureStorage.xml'
|
||||||
|
|
||||||
|
class AndroidObfuscation(object):
|
||||||
|
'''AndroidObfuscation
|
||||||
|
For the key, it's written in java, and run in android dalvikvm
|
||||||
|
'''
|
||||||
|
|
||||||
|
key = a2b_hex('0176e04c9408b1702d90be333fd53523')
|
||||||
|
|
||||||
|
def encrypt(self, plaintext):
|
||||||
|
cipher = self._get_cipher()
|
||||||
|
padding = len(self.key) - len(plaintext) % len(self.key)
|
||||||
|
plaintext += chr(padding) * padding
|
||||||
|
return b2a_hex(cipher.encrypt(plaintext))
|
||||||
|
|
||||||
|
def decrypt(self, ciphertext):
|
||||||
|
cipher = self._get_cipher()
|
||||||
|
plaintext = cipher.decrypt(a2b_hex(ciphertext))
|
||||||
|
return plaintext[:-ord(plaintext[-1])]
|
||||||
|
|
||||||
|
def _get_cipher(self):
|
||||||
|
try:
|
||||||
|
from Crypto.Cipher import AES
|
||||||
|
return AES.new(self.key)
|
||||||
|
except ImportError:
|
||||||
|
from aescbc import AES, noPadding
|
||||||
|
return AES(self.key, padding=noPadding())
|
||||||
|
|
||||||
|
class AndroidObfuscationV2(AndroidObfuscation):
|
||||||
|
'''AndroidObfuscationV2
|
||||||
|
'''
|
||||||
|
|
||||||
|
count = 503
|
||||||
|
password = 'Thomsun was here!'
|
||||||
|
|
||||||
|
def __init__(self, salt):
|
||||||
|
key = self.password + salt
|
||||||
|
for _ in range(self.count):
|
||||||
|
key = md5(key).digest()
|
||||||
|
self.key = key[:8]
|
||||||
|
self.iv = key[8:16]
|
||||||
|
|
||||||
|
def _get_cipher(self):
|
||||||
|
try :
|
||||||
|
from Crypto.Cipher import DES
|
||||||
|
return DES.new(self.key, DES.MODE_CBC, self.iv)
|
||||||
|
except ImportError:
|
||||||
|
from python_des import Des, CBC
|
||||||
|
return Des(self.key, CBC, self.iv)
|
||||||
|
|
||||||
|
def parse_preference(path):
|
||||||
|
''' parse android's shared preference xml '''
|
||||||
|
storage = {}
|
||||||
|
read = open(path)
|
||||||
|
for line in read:
|
||||||
|
line = line.strip()
|
||||||
|
# <string name="key">value</string>
|
||||||
|
if line.startswith('<string name="'):
|
||||||
|
index = line.find('"', 14)
|
||||||
|
key = line[14:index]
|
||||||
|
value = line[index+2:-9]
|
||||||
|
storage[key] = value
|
||||||
|
read.close()
|
||||||
|
return storage
|
||||||
|
|
||||||
|
def get_serials(path=None):
|
||||||
|
''' get serials from android's shared preference xml '''
|
||||||
|
if path is None:
|
||||||
|
if not os.path.isfile(STORAGE):
|
||||||
|
if os.path.isfile("backup.ab"):
|
||||||
|
get_storage()
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
path = STORAGE
|
||||||
|
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
return []
|
||||||
|
|
||||||
|
storage = parse_preference(path)
|
||||||
|
salt = storage.get('AmazonSaltKey')
|
||||||
|
if salt and len(salt) == 16:
|
||||||
|
sys.stdout.write('Using AndroidObfuscationV2\n')
|
||||||
|
obfuscation = AndroidObfuscationV2(a2b_hex(salt))
|
||||||
|
else:
|
||||||
|
sys.stdout.write('Using AndroidObfuscation\n')
|
||||||
|
obfuscation = AndroidObfuscation()
|
||||||
|
|
||||||
|
def get_value(key):
|
||||||
|
encrypted_key = obfuscation.encrypt(key)
|
||||||
|
encrypted_value = storage.get(encrypted_key)
|
||||||
|
if encrypted_value:
|
||||||
|
return obfuscation.decrypt(encrypted_value)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
# also see getK4Pids in kgenpids.py
|
||||||
|
try:
|
||||||
|
dsnid = get_value('DsnId')
|
||||||
|
except:
|
||||||
|
sys.stderr.write('cannot get DsnId\n')
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
tokens = set(get_value('kindle.account.tokens').split(','))
|
||||||
|
except:
|
||||||
|
return [dsnid]
|
||||||
|
|
||||||
|
serials = []
|
||||||
|
for token in tokens:
|
||||||
|
if token:
|
||||||
|
serials.append('%s%s' % (dsnid, token))
|
||||||
|
serials.append(dsnid)
|
||||||
|
for token in tokens:
|
||||||
|
if token:
|
||||||
|
serials.append(token)
|
||||||
|
return serials
|
||||||
|
|
||||||
|
def get_storage(path='backup.ab'):
|
||||||
|
'''get AmazonSecureStorage.xml from android backup.ab
|
||||||
|
backup.ab can be get using adb command:
|
||||||
|
shell> adb backup com.amazon.kindle
|
||||||
|
'''
|
||||||
|
output = None
|
||||||
|
read = open(path, 'rb')
|
||||||
|
head = read.read(24)
|
||||||
|
if head == 'ANDROID BACKUP\n1\n1\nnone\n':
|
||||||
|
output = StringIO(zlib.decompress(read.read()))
|
||||||
|
read.close()
|
||||||
|
|
||||||
|
if not output:
|
||||||
|
return False
|
||||||
|
|
||||||
|
tar = tarfile.open(fileobj=output)
|
||||||
|
for member in tar.getmembers():
|
||||||
|
if member.name.strip().endswith(STORAGE):
|
||||||
|
write = open(STORAGE, 'w')
|
||||||
|
write.write(tar.extractfile(member).read())
|
||||||
|
write.close()
|
||||||
|
break
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
__all__ = [ 'get_storage', 'get_serials', 'parse_preference',
|
||||||
|
'AndroidObfuscation', 'AndroidObfuscationV2', 'STORAGE']
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print get_serials()
|
|
@ -0,0 +1,6 @@
|
||||||
|
1.1 get AmazonSecureStorage.xml from /data/data/com.amazon.kindle/shared_prefs/AmazonSecureStorage.xml
|
||||||
|
|
||||||
|
1.2 on android 4.0+, run `adb backup com.amazon.kindle` from PC will get backup.ab
|
||||||
|
now android.py can convert backup.ab to AmazonSecureStorage.xml
|
||||||
|
|
||||||
|
2. run `k4mobidedrm.py -a AmazonSecureStorage.xml <infile> <outdir>'
|
|
@ -1,45 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# base64.py, version 1.0
|
|
||||||
# Copyright © 2010 Apprentice Alf
|
|
||||||
|
|
||||||
# Released under the terms of the GNU General Public Licence, version 3 or
|
|
||||||
# later. <http://www.gnu.org/licenses/>
|
|
||||||
|
|
||||||
# Revision history:
|
|
||||||
# 1 - Initial release. To allow Applescript to do base64 encoding
|
|
||||||
|
|
||||||
"""
|
|
||||||
Provide base64 encoding.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import base64
|
|
||||||
|
|
||||||
def usage(progname):
|
|
||||||
print "Applies base64 encoding to the supplied file, sending to standard output"
|
|
||||||
print "Usage:"
|
|
||||||
print " %s <infile>" % progname
|
|
||||||
|
|
||||||
def cli_main(argv=sys.argv):
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
|
|
||||||
if len(argv)<2:
|
|
||||||
usage(progname)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
keypath = argv[1]
|
|
||||||
with open(keypath, 'rb') as f:
|
|
||||||
keyder = f.read()
|
|
||||||
print keyder.encode('base64')
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
sys.exit(cli_main())
|
|
|
@ -1,208 +1,45 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
# -*- coding: utf-8 -*-
|
||||||
# This is a python script. You need a Python interpreter to run it.
|
|
||||||
# For example, ActiveState Python, which exists for windows.
|
# base64.py, version 1.0
|
||||||
#
|
# Copyright © 2010 Apprentice Alf
|
||||||
# Changelog drmcheck
|
|
||||||
# 1.00 - Initial version, with code from various other scripts
|
# Released under the terms of the GNU General Public Licence, version 3 or
|
||||||
# 1.01 - Moved authorship announcement to usage section.
|
# later. <http://www.gnu.org/licenses/>
|
||||||
#
|
|
||||||
# Changelog epubtest
|
# Revision history:
|
||||||
# 1.00 - Cut to epubtest.py, testing ePub files only by Apprentice Alf
|
# 1 - Initial release. To allow Applescript to do base64 encoding
|
||||||
# 1.01 - Added routine for use by Windows DeDRM
|
|
||||||
#
|
"""
|
||||||
# Written in 2011 by Paul Durrant
|
Provide base64 encoding.
|
||||||
# Released with unlicense. See http://unlicense.org/
|
"""
|
||||||
#
|
|
||||||
#############################################################################
|
|
||||||
#
|
|
||||||
# This is free and unencumbered software released into the public domain.
|
|
||||||
#
|
|
||||||
# Anyone is free to copy, modify, publish, use, compile, sell, or
|
|
||||||
# distribute this software, either in source code form or as a compiled
|
|
||||||
# binary, for any purpose, commercial or non-commercial, and by any
|
|
||||||
# means.
|
|
||||||
#
|
|
||||||
# In jurisdictions that recognize copyright laws, the author or authors
|
|
||||||
# of this software dedicate any and all copyright interest in the
|
|
||||||
# software to the public domain. We make this dedication for the benefit
|
|
||||||
# of the public at large and to the detriment of our heirs and
|
|
||||||
# successors. We intend this dedication to be an overt act of
|
|
||||||
# relinquishment in perpetuity of all present and future rights to this
|
|
||||||
# software under copyright law.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
# OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
#
|
|
||||||
#############################################################################
|
|
||||||
#
|
|
||||||
# It's still polite to give attribution if you do reuse this code.
|
|
||||||
#
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
__version__ = '1.01'
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
import sys, struct, os
|
import sys
|
||||||
import zlib
|
import os
|
||||||
import zipfile
|
import base64
|
||||||
import xml.etree.ElementTree as etree
|
|
||||||
|
|
||||||
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
def usage(progname):
|
||||||
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
print "Applies base64 encoding to the supplied file, sending to standard output"
|
||||||
|
print "Usage:"
|
||||||
|
print " %s <infile>" % progname
|
||||||
|
|
||||||
# Wrap a stream so that output gets flushed immediately
|
def cli_main(argv=sys.argv):
|
||||||
# and also make sure that any unicode strings get
|
progname = os.path.basename(argv[0])
|
||||||
# encoded using "replace" before writing them.
|
|
||||||
class SafeUnbuffered:
|
|
||||||
def __init__(self, stream):
|
|
||||||
self.stream = stream
|
|
||||||
self.encoding = stream.encoding
|
|
||||||
if self.encoding == None:
|
|
||||||
self.encoding = "utf-8"
|
|
||||||
def write(self, data):
|
|
||||||
if isinstance(data,unicode):
|
|
||||||
data = data.encode(self.encoding,"replace")
|
|
||||||
self.stream.write(data)
|
|
||||||
self.stream.flush()
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
return getattr(self.stream, attr)
|
|
||||||
|
|
||||||
try:
|
if len(argv)<2:
|
||||||
from calibre.constants import iswindows, isosx
|
usage(progname)
|
||||||
except:
|
sys.exit(2)
|
||||||
iswindows = sys.platform.startswith('win')
|
|
||||||
isosx = sys.platform.startswith('darwin')
|
|
||||||
|
|
||||||
def unicode_argv():
|
keypath = argv[1]
|
||||||
if iswindows:
|
with open(keypath, 'rb') as f:
|
||||||
# Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode
|
keyder = f.read()
|
||||||
# strings.
|
print keyder.encode('base64')
|
||||||
|
|
||||||
# Versions 2.x of Python don't support Unicode in sys.argv on
|
|
||||||
# Windows, with the underlying Windows API instead replacing multi-byte
|
|
||||||
# characters with '?'. So use shell32.GetCommandLineArgvW to get sys.argv
|
|
||||||
# as a list of Unicode strings and encode them as utf-8
|
|
||||||
|
|
||||||
from ctypes import POINTER, byref, cdll, c_int, windll
|
|
||||||
from ctypes.wintypes import LPCWSTR, LPWSTR
|
|
||||||
|
|
||||||
GetCommandLineW = cdll.kernel32.GetCommandLineW
|
|
||||||
GetCommandLineW.argtypes = []
|
|
||||||
GetCommandLineW.restype = LPCWSTR
|
|
||||||
|
|
||||||
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
|
|
||||||
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
|
|
||||||
CommandLineToArgvW.restype = POINTER(LPWSTR)
|
|
||||||
|
|
||||||
cmd = GetCommandLineW()
|
|
||||||
argc = c_int(0)
|
|
||||||
argv = CommandLineToArgvW(cmd, byref(argc))
|
|
||||||
if argc.value > 0:
|
|
||||||
# Remove Python executable and commands if present
|
|
||||||
start = argc.value - len(sys.argv)
|
|
||||||
return [argv[i] for i in
|
|
||||||
xrange(start, argc.value)]
|
|
||||||
# if we don't have any arguments at all, just pass back script name
|
|
||||||
# this should never happen
|
|
||||||
return [u"epubtest.py"]
|
|
||||||
else:
|
|
||||||
argvencoding = sys.stdin.encoding
|
|
||||||
if argvencoding == None:
|
|
||||||
argvencoding = "utf-8"
|
|
||||||
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
|
||||||
|
|
||||||
_FILENAME_LEN_OFFSET = 26
|
|
||||||
_EXTRA_LEN_OFFSET = 28
|
|
||||||
_FILENAME_OFFSET = 30
|
|
||||||
_MAX_SIZE = 64 * 1024
|
|
||||||
|
|
||||||
|
|
||||||
def uncompress(cmpdata):
|
|
||||||
dc = zlib.decompressobj(-15)
|
|
||||||
data = ''
|
|
||||||
while len(cmpdata) > 0:
|
|
||||||
if len(cmpdata) > _MAX_SIZE :
|
|
||||||
newdata = cmpdata[0:_MAX_SIZE]
|
|
||||||
cmpdata = cmpdata[_MAX_SIZE:]
|
|
||||||
else:
|
|
||||||
newdata = cmpdata
|
|
||||||
cmpdata = ''
|
|
||||||
newdata = dc.decompress(newdata)
|
|
||||||
unprocessed = dc.unconsumed_tail
|
|
||||||
if len(unprocessed) == 0:
|
|
||||||
newdata += dc.flush()
|
|
||||||
data += newdata
|
|
||||||
cmpdata += unprocessed
|
|
||||||
unprocessed = ''
|
|
||||||
return data
|
|
||||||
|
|
||||||
def getfiledata(file, zi):
|
|
||||||
# get file name length and exta data length to find start of file data
|
|
||||||
local_header_offset = zi.header_offset
|
|
||||||
|
|
||||||
file.seek(local_header_offset + _FILENAME_LEN_OFFSET)
|
|
||||||
leninfo = file.read(2)
|
|
||||||
local_name_length, = struct.unpack('<H', leninfo)
|
|
||||||
|
|
||||||
file.seek(local_header_offset + _EXTRA_LEN_OFFSET)
|
|
||||||
exinfo = file.read(2)
|
|
||||||
extra_field_length, = struct.unpack('<H', exinfo)
|
|
||||||
|
|
||||||
file.seek(local_header_offset + _FILENAME_OFFSET + local_name_length + extra_field_length)
|
|
||||||
data = None
|
|
||||||
|
|
||||||
# if not compressed we are good to go
|
|
||||||
if zi.compress_type == zipfile.ZIP_STORED:
|
|
||||||
data = file.read(zi.file_size)
|
|
||||||
|
|
||||||
# if compressed we must decompress it using zlib
|
|
||||||
if zi.compress_type == zipfile.ZIP_DEFLATED:
|
|
||||||
cmpdata = file.read(zi.compress_size)
|
|
||||||
data = uncompress(cmpdata)
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
def encryption(infile):
|
|
||||||
# returns encryption: one of Unencrypted, Adobe, B&N and Unknown
|
|
||||||
encryption = "Unknown"
|
|
||||||
try:
|
|
||||||
with open(infile,'rb') as infileobject:
|
|
||||||
bookdata = infileobject.read(58)
|
|
||||||
# Check for Zip
|
|
||||||
if bookdata[0:0+2] == "PK":
|
|
||||||
foundrights = False
|
|
||||||
foundencryption = False
|
|
||||||
inzip = zipfile.ZipFile(infile,'r')
|
|
||||||
namelist = set(inzip.namelist())
|
|
||||||
if 'META-INF/rights.xml' not in namelist or 'META-INF/encryption.xml' not in namelist:
|
|
||||||
encryption = "Unencrypted"
|
|
||||||
else:
|
|
||||||
rights = etree.fromstring(inzip.read('META-INF/rights.xml'))
|
|
||||||
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
|
||||||
expr = './/%s' % (adept('encryptedKey'),)
|
|
||||||
bookkey = ''.join(rights.findtext(expr))
|
|
||||||
if len(bookkey) == 172:
|
|
||||||
encryption = "Adobe"
|
|
||||||
elif len(bookkey) == 64:
|
|
||||||
encryption = "B&N"
|
|
||||||
else:
|
|
||||||
encryption = "Unknown"
|
|
||||||
except:
|
|
||||||
traceback.print_exc()
|
|
||||||
return encryption
|
|
||||||
|
|
||||||
def main():
|
|
||||||
argv=unicode_argv()
|
|
||||||
print encryption(argv[1])
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.stdout=SafeUnbuffered(sys.stdout)
|
if __name__ == '__main__':
|
||||||
sys.stderr=SafeUnbuffered(sys.stderr)
|
sys.exit(cli_main())
|
||||||
sys.exit(main())
|
|
||||||
|
|
|
@ -1,82 +1,60 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# erdr2pml.py
|
|
||||||
# Copyright © 2008 The Dark Reverser
|
|
||||||
#
|
#
|
||||||
# Modified 2008–2012 by some_updates, DiapDealer and Apprentice Alf
|
|
||||||
|
|
||||||
# This is a python script. You need a Python interpreter to run it.
|
# This is a python script. You need a Python interpreter to run it.
|
||||||
# For example, ActiveState Python, which exists for windows.
|
# For example, ActiveState Python, which exists for windows.
|
||||||
# Changelog
|
|
||||||
#
|
#
|
||||||
# Based on ereader2html version 0.08 plus some later small fixes
|
# Changelog drmcheck
|
||||||
|
# 1.00 - Initial version, with code from various other scripts
|
||||||
|
# 1.01 - Moved authorship announcement to usage section.
|
||||||
|
#
|
||||||
|
# Changelog epubtest
|
||||||
|
# 1.00 - Cut to epubtest.py, testing ePub files only by Apprentice Alf
|
||||||
|
# 1.01 - Added routine for use by Windows DeDRM
|
||||||
|
#
|
||||||
|
# Written in 2011 by Paul Durrant
|
||||||
|
# Released with unlicense. See http://unlicense.org/
|
||||||
|
#
|
||||||
|
#############################################################################
|
||||||
|
#
|
||||||
|
# This is free and unencumbered software released into the public domain.
|
||||||
|
#
|
||||||
|
# Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||||
|
# distribute this software, either in source code form or as a compiled
|
||||||
|
# binary, for any purpose, commercial or non-commercial, and by any
|
||||||
|
# means.
|
||||||
|
#
|
||||||
|
# In jurisdictions that recognize copyright laws, the author or authors
|
||||||
|
# of this software dedicate any and all copyright interest in the
|
||||||
|
# software to the public domain. We make this dedication for the benefit
|
||||||
|
# of the public at large and to the detriment of our heirs and
|
||||||
|
# successors. We intend this dedication to be an overt act of
|
||||||
|
# relinquishment in perpetuity of all present and future rights to this
|
||||||
|
# software under copyright law.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||||
|
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
# OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
#
|
||||||
|
#############################################################################
|
||||||
|
#
|
||||||
|
# It's still polite to give attribution if you do reuse this code.
|
||||||
#
|
#
|
||||||
# 0.01 - Initial version
|
|
||||||
# 0.02 - Support more eReader files. Support bold text and links. Fix PML decoder parsing bug.
|
|
||||||
# 0.03 - Fix incorrect variable usage at one place.
|
|
||||||
# 0.03b - enhancement by DeBockle (version 259 support)
|
|
||||||
# Custom version 0.03 - no change to eReader support, only usability changes
|
|
||||||
# - start of pep-8 indentation (spaces not tab), fix trailing blanks
|
|
||||||
# - version variable, only one place to change
|
|
||||||
# - added main routine, now callable as a library/module,
|
|
||||||
# means tools can add optional support for ereader2html
|
|
||||||
# - outdir is no longer a mandatory parameter (defaults based on input name if missing)
|
|
||||||
# - time taken output to stdout
|
|
||||||
# - Psyco support - reduces runtime by a factor of (over) 3!
|
|
||||||
# E.g. (~600Kb file) 90 secs down to 24 secs
|
|
||||||
# - newstyle classes
|
|
||||||
# - changed map call to list comprehension
|
|
||||||
# may not work with python 2.3
|
|
||||||
# without Psyco this reduces runtime to 90%
|
|
||||||
# E.g. 90 secs down to 77 secs
|
|
||||||
# Psyco with map calls takes longer, do not run with map in Psyco JIT!
|
|
||||||
# - izip calls used instead of zip (if available), further reduction
|
|
||||||
# in run time (factor of 4.5).
|
|
||||||
# E.g. (~600Kb file) 90 secs down to 20 secs
|
|
||||||
# - Python 2.6+ support, avoid DeprecationWarning with sha/sha1
|
|
||||||
# 0.04 - Footnote support, PML output, correct charset in html, support more PML tags
|
|
||||||
# - Feature change, dump out PML file
|
|
||||||
# - Added supprt for footnote tags. NOTE footnote ids appear to be bad (not usable)
|
|
||||||
# in some pdb files :-( due to the same id being used multiple times
|
|
||||||
# - Added correct charset encoding (pml is based on cp1252)
|
|
||||||
# - Added logging support.
|
|
||||||
# 0.05 - Improved type 272 support for sidebars, links, chapters, metainfo, etc
|
|
||||||
# 0.06 - Merge of 0.04 and 0.05. Improved HTML output
|
|
||||||
# Placed images in subfolder, so that it's possible to just
|
|
||||||
# drop the book.pml file onto DropBook to make an unencrypted
|
|
||||||
# copy of the eReader file.
|
|
||||||
# Using that with Calibre works a lot better than the HTML
|
|
||||||
# conversion in this code.
|
|
||||||
# 0.07 - Further Improved type 272 support for sidebars with all earlier fixes
|
|
||||||
# 0.08 - fixed typos, removed extraneous things
|
|
||||||
# 0.09 - fixed typos in first_pages to first_page to again support older formats
|
|
||||||
# 0.10 - minor cleanups
|
|
||||||
# 0.11 - fixups for using correct xml for footnotes and sidebars for use with Dropbook
|
|
||||||
# 0.12 - Fix added to prevent lowercasing of image names when the pml code itself uses a different case in the link name.
|
|
||||||
# 0.13 - change to unbuffered stdout for use with gui front ends
|
|
||||||
# 0.14 - contributed enhancement to support --make-pmlz switch
|
|
||||||
# 0.15 - enabled high-ascii to pml character encoding. DropBook now works on Mac.
|
|
||||||
# 0.16 - convert to use openssl DES (very very fast) or pure python DES if openssl's libcrypto is not available
|
|
||||||
# 0.17 - added support for pycrypto's DES as well
|
|
||||||
# 0.18 - on Windows try PyCrypto first and OpenSSL next
|
|
||||||
# 0.19 - Modify the interface to allow use of import
|
|
||||||
# 0.20 - modify to allow use inside new interface for calibre plugins
|
|
||||||
# 0.21 - Support eReader (drm) version 11.
|
|
||||||
# - Don't reject dictionary format.
|
|
||||||
# - Ignore sidebars for dictionaries (different format?)
|
|
||||||
# 0.22 - Unicode and plugin support, different image folders for PMLZ and source
|
|
||||||
# 0.23 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
|
||||||
|
|
||||||
__version__='0.23'
|
from __future__ import with_statement
|
||||||
|
|
||||||
import sys, re
|
__version__ = '1.01'
|
||||||
import struct, binascii, getopt, zlib, os, os.path, urllib, tempfile, traceback
|
|
||||||
|
|
||||||
if 'calibre' in sys.modules:
|
import sys, struct, os
|
||||||
inCalibre = True
|
import zlib
|
||||||
else:
|
import zipfile
|
||||||
inCalibre = False
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
|
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
||||||
|
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
||||||
|
|
||||||
# Wrap a stream so that output gets flushed immediately
|
# Wrap a stream so that output gets flushed immediately
|
||||||
# and also make sure that any unicode strings get
|
# and also make sure that any unicode strings get
|
||||||
|
@ -95,8 +73,11 @@ class SafeUnbuffered:
|
||||||
def __getattr__(self, attr):
|
def __getattr__(self, attr):
|
||||||
return getattr(self.stream, attr)
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
iswindows = sys.platform.startswith('win')
|
try:
|
||||||
isosx = sys.platform.startswith('darwin')
|
from calibre.constants import iswindows, isosx
|
||||||
|
except:
|
||||||
|
iswindows = sys.platform.startswith('win')
|
||||||
|
isosx = sys.platform.startswith('darwin')
|
||||||
|
|
||||||
def unicode_argv():
|
def unicode_argv():
|
||||||
if iswindows:
|
if iswindows:
|
||||||
|
@ -105,8 +86,8 @@ def unicode_argv():
|
||||||
|
|
||||||
# Versions 2.x of Python don't support Unicode in sys.argv on
|
# Versions 2.x of Python don't support Unicode in sys.argv on
|
||||||
# Windows, with the underlying Windows API instead replacing multi-byte
|
# Windows, with the underlying Windows API instead replacing multi-byte
|
||||||
# characters with '?'.
|
# characters with '?'. So use shell32.GetCommandLineArgvW to get sys.argv
|
||||||
|
# as a list of Unicode strings and encode them as utf-8
|
||||||
|
|
||||||
from ctypes import POINTER, byref, cdll, c_int, windll
|
from ctypes import POINTER, byref, cdll, c_int, windll
|
||||||
from ctypes.wintypes import LPCWSTR, LPWSTR
|
from ctypes.wintypes import LPCWSTR, LPWSTR
|
||||||
|
@ -129,469 +110,99 @@ def unicode_argv():
|
||||||
xrange(start, argc.value)]
|
xrange(start, argc.value)]
|
||||||
# if we don't have any arguments at all, just pass back script name
|
# if we don't have any arguments at all, just pass back script name
|
||||||
# this should never happen
|
# this should never happen
|
||||||
return [u"mobidedrm.py"]
|
return [u"epubtest.py"]
|
||||||
else:
|
else:
|
||||||
argvencoding = sys.stdin.encoding
|
argvencoding = sys.stdin.encoding
|
||||||
if argvencoding == None:
|
if argvencoding == None:
|
||||||
argvencoding = "utf-8"
|
argvencoding = "utf-8"
|
||||||
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
||||||
|
|
||||||
Des = None
|
_FILENAME_LEN_OFFSET = 26
|
||||||
if iswindows:
|
_EXTRA_LEN_OFFSET = 28
|
||||||
# first try with pycrypto
|
_FILENAME_OFFSET = 30
|
||||||
if inCalibre:
|
_MAX_SIZE = 64 * 1024
|
||||||
from calibre_plugins.dedrm import pycrypto_des
|
|
||||||
else:
|
|
||||||
import pycrypto_des
|
|
||||||
Des = pycrypto_des.load_pycrypto()
|
|
||||||
if Des == None:
|
|
||||||
# they try with openssl
|
|
||||||
if inCalibre:
|
|
||||||
from calibre_plugins.dedrm import openssl_des
|
|
||||||
else:
|
|
||||||
import openssl_des
|
|
||||||
Des = openssl_des.load_libcrypto()
|
|
||||||
else:
|
|
||||||
# first try with openssl
|
|
||||||
if inCalibre:
|
|
||||||
from calibre_plugins.dedrm import openssl_des
|
|
||||||
else:
|
|
||||||
import openssl_des
|
|
||||||
Des = openssl_des.load_libcrypto()
|
|
||||||
if Des == None:
|
|
||||||
# then try with pycrypto
|
|
||||||
if inCalibre:
|
|
||||||
from calibre_plugins.dedrm import pycrypto_des
|
|
||||||
else:
|
|
||||||
import pycrypto_des
|
|
||||||
Des = pycrypto_des.load_pycrypto()
|
|
||||||
|
|
||||||
# if that did not work then use pure python implementation
|
|
||||||
# of DES and try to speed it up with Psycho
|
def uncompress(cmpdata):
|
||||||
if Des == None:
|
dc = zlib.decompressobj(-15)
|
||||||
if inCalibre:
|
data = ''
|
||||||
from calibre_plugins.dedrm import python_des
|
while len(cmpdata) > 0:
|
||||||
else:
|
if len(cmpdata) > _MAX_SIZE :
|
||||||
import python_des
|
newdata = cmpdata[0:_MAX_SIZE]
|
||||||
Des = python_des.Des
|
cmpdata = cmpdata[_MAX_SIZE:]
|
||||||
# Import Psyco if available
|
else:
|
||||||
|
newdata = cmpdata
|
||||||
|
cmpdata = ''
|
||||||
|
newdata = dc.decompress(newdata)
|
||||||
|
unprocessed = dc.unconsumed_tail
|
||||||
|
if len(unprocessed) == 0:
|
||||||
|
newdata += dc.flush()
|
||||||
|
data += newdata
|
||||||
|
cmpdata += unprocessed
|
||||||
|
unprocessed = ''
|
||||||
|
return data
|
||||||
|
|
||||||
|
def getfiledata(file, zi):
|
||||||
|
# get file name length and exta data length to find start of file data
|
||||||
|
local_header_offset = zi.header_offset
|
||||||
|
|
||||||
|
file.seek(local_header_offset + _FILENAME_LEN_OFFSET)
|
||||||
|
leninfo = file.read(2)
|
||||||
|
local_name_length, = struct.unpack('<H', leninfo)
|
||||||
|
|
||||||
|
file.seek(local_header_offset + _EXTRA_LEN_OFFSET)
|
||||||
|
exinfo = file.read(2)
|
||||||
|
extra_field_length, = struct.unpack('<H', exinfo)
|
||||||
|
|
||||||
|
file.seek(local_header_offset + _FILENAME_OFFSET + local_name_length + extra_field_length)
|
||||||
|
data = None
|
||||||
|
|
||||||
|
# if not compressed we are good to go
|
||||||
|
if zi.compress_type == zipfile.ZIP_STORED:
|
||||||
|
data = file.read(zi.file_size)
|
||||||
|
|
||||||
|
# if compressed we must decompress it using zlib
|
||||||
|
if zi.compress_type == zipfile.ZIP_DEFLATED:
|
||||||
|
cmpdata = file.read(zi.compress_size)
|
||||||
|
data = uncompress(cmpdata)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
def encryption(infile):
|
||||||
|
# returns encryption: one of Unencrypted, Adobe, B&N and Unknown
|
||||||
|
encryption = "Unknown"
|
||||||
try:
|
try:
|
||||||
# http://psyco.sourceforge.net
|
with open(infile,'rb') as infileobject:
|
||||||
import psyco
|
bookdata = infileobject.read(58)
|
||||||
psyco.full()
|
# Check for Zip
|
||||||
except ImportError:
|
if bookdata[0:0+2] == "PK":
|
||||||
pass
|
foundrights = False
|
||||||
|
foundencryption = False
|
||||||
try:
|
inzip = zipfile.ZipFile(infile,'r')
|
||||||
from hashlib import sha1
|
namelist = set(inzip.namelist())
|
||||||
except ImportError:
|
if 'META-INF/rights.xml' not in namelist or 'META-INF/encryption.xml' not in namelist:
|
||||||
# older Python release
|
encryption = "Unencrypted"
|
||||||
import sha
|
else:
|
||||||
sha1 = lambda s: sha.new(s)
|
rights = etree.fromstring(inzip.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
import cgi
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
import logging
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
if len(bookkey) == 172:
|
||||||
logging.basicConfig()
|
encryption = "Adobe"
|
||||||
#logging.basicConfig(level=logging.DEBUG)
|
elif len(bookkey) == 64:
|
||||||
|
encryption = "B&N"
|
||||||
|
else:
|
||||||
class Sectionizer(object):
|
encryption = "Unknown"
|
||||||
bkType = "Book"
|
except:
|
||||||
|
|
||||||
def __init__(self, filename, ident):
|
|
||||||
self.contents = file(filename, 'rb').read()
|
|
||||||
self.header = self.contents[0:72]
|
|
||||||
self.num_sections, = struct.unpack('>H', self.contents[76:78])
|
|
||||||
# Dictionary or normal content (TODO: Not hard-coded)
|
|
||||||
if self.header[0x3C:0x3C+8] != ident:
|
|
||||||
if self.header[0x3C:0x3C+8] == "PDctPPrs":
|
|
||||||
self.bkType = "Dict"
|
|
||||||
else:
|
|
||||||
raise ValueError('Invalid file format')
|
|
||||||
self.sections = []
|
|
||||||
for i in xrange(self.num_sections):
|
|
||||||
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.contents[78+i*8:78+i*8+8])
|
|
||||||
flags, val = a1, a2<<16|a3<<8|a4
|
|
||||||
self.sections.append( (offset, flags, val) )
|
|
||||||
def loadSection(self, section):
|
|
||||||
if section + 1 == self.num_sections:
|
|
||||||
end_off = len(self.contents)
|
|
||||||
else:
|
|
||||||
end_off = self.sections[section + 1][0]
|
|
||||||
off = self.sections[section][0]
|
|
||||||
return self.contents[off:end_off]
|
|
||||||
|
|
||||||
# cleanup unicode filenames
|
|
||||||
# borrowed from calibre from calibre/src/calibre/__init__.py
|
|
||||||
# added in removal of control (<32) chars
|
|
||||||
# and removal of . at start and end
|
|
||||||
# and with some (heavily edited) code from Paul Durrant's kindlenamer.py
|
|
||||||
def sanitizeFileName(name):
|
|
||||||
# substitute filename unfriendly characters
|
|
||||||
name = name.replace(u"<",u"[").replace(u">",u"]").replace(u" : ",u" – ").replace(u": ",u" – ").replace(u":",u"—").replace(u"/",u"_").replace(u"\\",u"_").replace(u"|",u"_").replace(u"\"",u"\'")
|
|
||||||
# delete control characters
|
|
||||||
name = u"".join(char for char in name if ord(char)>=32)
|
|
||||||
# white space to single space, delete leading and trailing while space
|
|
||||||
name = re.sub(ur"\s", u" ", name).strip()
|
|
||||||
# remove leading dots
|
|
||||||
while len(name)>0 and name[0] == u".":
|
|
||||||
name = name[1:]
|
|
||||||
# remove trailing dots (Windows doesn't like them)
|
|
||||||
if name.endswith(u'.'):
|
|
||||||
name = name[:-1]
|
|
||||||
return name
|
|
||||||
|
|
||||||
def fixKey(key):
|
|
||||||
def fixByte(b):
|
|
||||||
return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80)
|
|
||||||
return "".join([chr(fixByte(ord(a))) for a in key])
|
|
||||||
|
|
||||||
def deXOR(text, sp, table):
|
|
||||||
r=''
|
|
||||||
j = sp
|
|
||||||
for i in xrange(len(text)):
|
|
||||||
r += chr(ord(table[j]) ^ ord(text[i]))
|
|
||||||
j = j + 1
|
|
||||||
if j == len(table):
|
|
||||||
j = 0
|
|
||||||
return r
|
|
||||||
|
|
||||||
class EreaderProcessor(object):
|
|
||||||
def __init__(self, sect, user_key):
|
|
||||||
self.section_reader = sect.loadSection
|
|
||||||
data = self.section_reader(0)
|
|
||||||
version, = struct.unpack('>H', data[0:2])
|
|
||||||
self.version = version
|
|
||||||
logging.info('eReader file format version %s', version)
|
|
||||||
if version != 272 and version != 260 and version != 259:
|
|
||||||
raise ValueError('incorrect eReader version %d (error 1)' % version)
|
|
||||||
data = self.section_reader(1)
|
|
||||||
self.data = data
|
|
||||||
des = Des(fixKey(data[0:8]))
|
|
||||||
cookie_shuf, cookie_size = struct.unpack('>LL', des.decrypt(data[-8:]))
|
|
||||||
if cookie_shuf < 3 or cookie_shuf > 0x14 or cookie_size < 0xf0 or cookie_size > 0x200:
|
|
||||||
raise ValueError('incorrect eReader version (error 2)')
|
|
||||||
input = des.decrypt(data[-cookie_size:])
|
|
||||||
def unshuff(data, shuf):
|
|
||||||
r = [''] * len(data)
|
|
||||||
j = 0
|
|
||||||
for i in xrange(len(data)):
|
|
||||||
j = (j + shuf) % len(data)
|
|
||||||
r[j] = data[i]
|
|
||||||
assert len("".join(r)) == len(data)
|
|
||||||
return "".join(r)
|
|
||||||
r = unshuff(input[0:-8], cookie_shuf)
|
|
||||||
|
|
||||||
drm_sub_version = struct.unpack('>H', r[0:2])[0]
|
|
||||||
self.num_text_pages = struct.unpack('>H', r[2:4])[0] - 1
|
|
||||||
self.num_image_pages = struct.unpack('>H', r[26:26+2])[0]
|
|
||||||
self.first_image_page = struct.unpack('>H', r[24:24+2])[0]
|
|
||||||
# Default values
|
|
||||||
self.num_footnote_pages = 0
|
|
||||||
self.num_sidebar_pages = 0
|
|
||||||
self.first_footnote_page = -1
|
|
||||||
self.first_sidebar_page = -1
|
|
||||||
if self.version == 272:
|
|
||||||
self.num_footnote_pages = struct.unpack('>H', r[46:46+2])[0]
|
|
||||||
self.first_footnote_page = struct.unpack('>H', r[44:44+2])[0]
|
|
||||||
if (sect.bkType == "Book"):
|
|
||||||
self.num_sidebar_pages = struct.unpack('>H', r[38:38+2])[0]
|
|
||||||
self.first_sidebar_page = struct.unpack('>H', r[36:36+2])[0]
|
|
||||||
# self.num_bookinfo_pages = struct.unpack('>H', r[34:34+2])[0]
|
|
||||||
# self.first_bookinfo_page = struct.unpack('>H', r[32:32+2])[0]
|
|
||||||
# self.num_chapter_pages = struct.unpack('>H', r[22:22+2])[0]
|
|
||||||
# self.first_chapter_page = struct.unpack('>H', r[20:20+2])[0]
|
|
||||||
# self.num_link_pages = struct.unpack('>H', r[30:30+2])[0]
|
|
||||||
# self.first_link_page = struct.unpack('>H', r[28:28+2])[0]
|
|
||||||
# self.num_xtextsize_pages = struct.unpack('>H', r[54:54+2])[0]
|
|
||||||
# self.first_xtextsize_page = struct.unpack('>H', r[52:52+2])[0]
|
|
||||||
|
|
||||||
# **before** data record 1 was decrypted and unshuffled, it contained data
|
|
||||||
# to create an XOR table and which is used to fix footnote record 0, link records, chapter records, etc
|
|
||||||
self.xortable_offset = struct.unpack('>H', r[40:40+2])[0]
|
|
||||||
self.xortable_size = struct.unpack('>H', r[42:42+2])[0]
|
|
||||||
self.xortable = self.data[self.xortable_offset:self.xortable_offset + self.xortable_size]
|
|
||||||
else:
|
|
||||||
# Nothing needs to be done
|
|
||||||
pass
|
|
||||||
# self.num_bookinfo_pages = 0
|
|
||||||
# self.num_chapter_pages = 0
|
|
||||||
# self.num_link_pages = 0
|
|
||||||
# self.num_xtextsize_pages = 0
|
|
||||||
# self.first_bookinfo_page = -1
|
|
||||||
# self.first_chapter_page = -1
|
|
||||||
# self.first_link_page = -1
|
|
||||||
# self.first_xtextsize_page = -1
|
|
||||||
|
|
||||||
logging.debug('self.num_text_pages %d', self.num_text_pages)
|
|
||||||
logging.debug('self.num_footnote_pages %d, self.first_footnote_page %d', self.num_footnote_pages , self.first_footnote_page)
|
|
||||||
logging.debug('self.num_sidebar_pages %d, self.first_sidebar_page %d', self.num_sidebar_pages , self.first_sidebar_page)
|
|
||||||
self.flags = struct.unpack('>L', r[4:8])[0]
|
|
||||||
reqd_flags = (1<<9) | (1<<7) | (1<<10)
|
|
||||||
if (self.flags & reqd_flags) != reqd_flags:
|
|
||||||
print "Flags: 0x%X" % self.flags
|
|
||||||
raise ValueError('incompatible eReader file')
|
|
||||||
des = Des(fixKey(user_key))
|
|
||||||
if version == 259:
|
|
||||||
if drm_sub_version != 7:
|
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
|
||||||
encrypted_key_sha = r[44:44+20]
|
|
||||||
encrypted_key = r[64:64+8]
|
|
||||||
elif version == 260:
|
|
||||||
if drm_sub_version != 13 and drm_sub_version != 11:
|
|
||||||
raise ValueError('incorrect eReader version %d (error 3)' % drm_sub_version)
|
|
||||||
if drm_sub_version == 13:
|
|
||||||
encrypted_key = r[44:44+8]
|
|
||||||
encrypted_key_sha = r[52:52+20]
|
|
||||||
else:
|
|
||||||
encrypted_key = r[64:64+8]
|
|
||||||
encrypted_key_sha = r[44:44+20]
|
|
||||||
elif version == 272:
|
|
||||||
encrypted_key = r[172:172+8]
|
|
||||||
encrypted_key_sha = r[56:56+20]
|
|
||||||
self.content_key = des.decrypt(encrypted_key)
|
|
||||||
if sha1(self.content_key).digest() != encrypted_key_sha:
|
|
||||||
raise ValueError('Incorrect Name and/or Credit Card')
|
|
||||||
|
|
||||||
def getNumImages(self):
|
|
||||||
return self.num_image_pages
|
|
||||||
|
|
||||||
def getImage(self, i):
|
|
||||||
sect = self.section_reader(self.first_image_page + i)
|
|
||||||
name = sect[4:4+32].strip('\0')
|
|
||||||
data = sect[62:]
|
|
||||||
return sanitizeFileName(unicode(name,'windows-1252')), data
|
|
||||||
|
|
||||||
|
|
||||||
# def getChapterNamePMLOffsetData(self):
|
|
||||||
# cv = ''
|
|
||||||
# if self.num_chapter_pages > 0:
|
|
||||||
# for i in xrange(self.num_chapter_pages):
|
|
||||||
# chaps = self.section_reader(self.first_chapter_page + i)
|
|
||||||
# j = i % self.xortable_size
|
|
||||||
# offname = deXOR(chaps, j, self.xortable)
|
|
||||||
# offset = struct.unpack('>L', offname[0:4])[0]
|
|
||||||
# name = offname[4:].strip('\0')
|
|
||||||
# cv += '%d|%s\n' % (offset, name)
|
|
||||||
# return cv
|
|
||||||
|
|
||||||
# def getLinkNamePMLOffsetData(self):
|
|
||||||
# lv = ''
|
|
||||||
# if self.num_link_pages > 0:
|
|
||||||
# for i in xrange(self.num_link_pages):
|
|
||||||
# links = self.section_reader(self.first_link_page + i)
|
|
||||||
# j = i % self.xortable_size
|
|
||||||
# offname = deXOR(links, j, self.xortable)
|
|
||||||
# offset = struct.unpack('>L', offname[0:4])[0]
|
|
||||||
# name = offname[4:].strip('\0')
|
|
||||||
# lv += '%d|%s\n' % (offset, name)
|
|
||||||
# return lv
|
|
||||||
|
|
||||||
# def getExpandedTextSizesData(self):
|
|
||||||
# ts = ''
|
|
||||||
# if self.num_xtextsize_pages > 0:
|
|
||||||
# tsize = deXOR(self.section_reader(self.first_xtextsize_page), 0, self.xortable)
|
|
||||||
# for i in xrange(self.num_text_pages):
|
|
||||||
# xsize = struct.unpack('>H', tsize[0:2])[0]
|
|
||||||
# ts += "%d\n" % xsize
|
|
||||||
# tsize = tsize[2:]
|
|
||||||
# return ts
|
|
||||||
|
|
||||||
# def getBookInfo(self):
|
|
||||||
# bkinfo = ''
|
|
||||||
# if self.num_bookinfo_pages > 0:
|
|
||||||
# info = self.section_reader(self.first_bookinfo_page)
|
|
||||||
# bkinfo = deXOR(info, 0, self.xortable)
|
|
||||||
# bkinfo = bkinfo.replace('\0','|')
|
|
||||||
# bkinfo += '\n'
|
|
||||||
# return bkinfo
|
|
||||||
|
|
||||||
def getText(self):
|
|
||||||
des = Des(fixKey(self.content_key))
|
|
||||||
r = ''
|
|
||||||
for i in xrange(self.num_text_pages):
|
|
||||||
logging.debug('get page %d', i)
|
|
||||||
r += zlib.decompress(des.decrypt(self.section_reader(1 + i)))
|
|
||||||
|
|
||||||
# now handle footnotes pages
|
|
||||||
if self.num_footnote_pages > 0:
|
|
||||||
r += '\n'
|
|
||||||
# the record 0 of the footnote section must pass through the Xor Table to make it useful
|
|
||||||
sect = self.section_reader(self.first_footnote_page)
|
|
||||||
fnote_ids = deXOR(sect, 0, self.xortable)
|
|
||||||
# the remaining records of the footnote sections need to be decoded with the content_key and zlib inflated
|
|
||||||
des = Des(fixKey(self.content_key))
|
|
||||||
for i in xrange(1,self.num_footnote_pages):
|
|
||||||
logging.debug('get footnotepage %d', i)
|
|
||||||
id_len = ord(fnote_ids[2])
|
|
||||||
id = fnote_ids[3:3+id_len]
|
|
||||||
fmarker = '<footnote id="%s">\n' % id
|
|
||||||
fmarker += zlib.decompress(des.decrypt(self.section_reader(self.first_footnote_page + i)))
|
|
||||||
fmarker += '\n</footnote>\n'
|
|
||||||
r += fmarker
|
|
||||||
fnote_ids = fnote_ids[id_len+4:]
|
|
||||||
|
|
||||||
# TODO: Handle dictionary index (?) pages - which are also marked as
|
|
||||||
# sidebar_pages (?). For now dictionary sidebars are ignored
|
|
||||||
# For dictionaries - record 0 is null terminated strings, followed by
|
|
||||||
# blocks of around 62000 bytes and a final block. Not sure of the
|
|
||||||
# encoding
|
|
||||||
|
|
||||||
# now handle sidebar pages
|
|
||||||
if self.num_sidebar_pages > 0:
|
|
||||||
r += '\n'
|
|
||||||
# the record 0 of the sidebar section must pass through the Xor Table to make it useful
|
|
||||||
sect = self.section_reader(self.first_sidebar_page)
|
|
||||||
sbar_ids = deXOR(sect, 0, self.xortable)
|
|
||||||
# the remaining records of the sidebar sections need to be decoded with the content_key and zlib inflated
|
|
||||||
des = Des(fixKey(self.content_key))
|
|
||||||
for i in xrange(1,self.num_sidebar_pages):
|
|
||||||
id_len = ord(sbar_ids[2])
|
|
||||||
id = sbar_ids[3:3+id_len]
|
|
||||||
smarker = '<sidebar id="%s">\n' % id
|
|
||||||
smarker += zlib.decompress(des.decrypt(self.section_reader(self.first_sidebar_page + i)))
|
|
||||||
smarker += '\n</sidebar>\n'
|
|
||||||
r += smarker
|
|
||||||
sbar_ids = sbar_ids[id_len+4:]
|
|
||||||
|
|
||||||
return r
|
|
||||||
|
|
||||||
def cleanPML(pml):
|
|
||||||
# Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255)
|
|
||||||
pml2 = pml
|
|
||||||
for k in xrange(128,256):
|
|
||||||
badChar = chr(k)
|
|
||||||
pml2 = pml2.replace(badChar, '\\a%03d' % k)
|
|
||||||
return pml2
|
|
||||||
|
|
||||||
def decryptBook(infile, outpath, make_pmlz, user_key):
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
|
||||||
if make_pmlz:
|
|
||||||
# outpath is actually pmlz name
|
|
||||||
pmlzname = outpath
|
|
||||||
outdir = tempfile.mkdtemp()
|
|
||||||
imagedirpath = os.path.join(outdir,u"images")
|
|
||||||
else:
|
|
||||||
pmlzname = None
|
|
||||||
outdir = outpath
|
|
||||||
imagedirpath = os.path.join(outdir,bookname + u"_img")
|
|
||||||
|
|
||||||
try:
|
|
||||||
if not os.path.exists(outdir):
|
|
||||||
os.makedirs(outdir)
|
|
||||||
print u"Decoding File"
|
|
||||||
sect = Sectionizer(infile, 'PNRdPPrs')
|
|
||||||
er = EreaderProcessor(sect, user_key)
|
|
||||||
|
|
||||||
if er.getNumImages() > 0:
|
|
||||||
print u"Extracting images"
|
|
||||||
if not os.path.exists(imagedirpath):
|
|
||||||
os.makedirs(imagedirpath)
|
|
||||||
for i in xrange(er.getNumImages()):
|
|
||||||
name, contents = er.getImage(i)
|
|
||||||
file(os.path.join(imagedirpath, name), 'wb').write(contents)
|
|
||||||
|
|
||||||
print u"Extracting pml"
|
|
||||||
pml_string = er.getText()
|
|
||||||
pmlfilename = bookname + ".pml"
|
|
||||||
file(os.path.join(outdir, pmlfilename),'wb').write(cleanPML(pml_string))
|
|
||||||
if pmlzname is not None:
|
|
||||||
import zipfile
|
|
||||||
import shutil
|
|
||||||
print u"Creating PMLZ file {0}".format(os.path.basename(pmlzname))
|
|
||||||
myZipFile = zipfile.ZipFile(pmlzname,'w',zipfile.ZIP_STORED, False)
|
|
||||||
list = os.listdir(outdir)
|
|
||||||
for filename in list:
|
|
||||||
localname = filename
|
|
||||||
filePath = os.path.join(outdir,filename)
|
|
||||||
if os.path.isfile(filePath):
|
|
||||||
myZipFile.write(filePath, localname)
|
|
||||||
elif os.path.isdir(filePath):
|
|
||||||
imageList = os.listdir(filePath)
|
|
||||||
localimgdir = os.path.basename(filePath)
|
|
||||||
for image in imageList:
|
|
||||||
localname = os.path.join(localimgdir,image)
|
|
||||||
imagePath = os.path.join(filePath,image)
|
|
||||||
if os.path.isfile(imagePath):
|
|
||||||
myZipFile.write(imagePath, localname)
|
|
||||||
myZipFile.close()
|
|
||||||
# remove temporary directory
|
|
||||||
shutil.rmtree(outdir, True)
|
|
||||||
print u"Output is {0}".format(pmlzname)
|
|
||||||
else :
|
|
||||||
print u"Output is in {0}".format(outdir)
|
|
||||||
print "done"
|
|
||||||
except ValueError, e:
|
|
||||||
print u"Error: {0}".format(e)
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
return 1
|
return encryption
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
|
||||||
print u"Converts DRMed eReader books to PML Source"
|
|
||||||
print u"Usage:"
|
|
||||||
print u" erdr2pml [options] infile.pdb [outpath] \"your name\" credit_card_number"
|
|
||||||
print u" "
|
|
||||||
print u"Options: "
|
|
||||||
print u" -h prints this message"
|
|
||||||
print u" -p create PMLZ instead of source folder"
|
|
||||||
print u" --make-pmlz create PMLZ instead of source folder"
|
|
||||||
print u" "
|
|
||||||
print u"Note:"
|
|
||||||
print u" if outpath is ommitted, creates source in 'infile_Source' folder"
|
|
||||||
print u" if outpath is ommitted and pmlz option, creates PMLZ 'infile.pmlz'"
|
|
||||||
print u" if source folder created, images are in infile_img folder"
|
|
||||||
print u" if pmlz file created, images are in images folder"
|
|
||||||
print u" It's enough to enter the last 8 digits of the credit card number"
|
|
||||||
return
|
|
||||||
|
|
||||||
def getuser_key(name,cc):
|
|
||||||
newname = "".join(c for c in name.lower() if c >= 'a' and c <= 'z' or c >= '0' and c <= '9')
|
|
||||||
cc = cc.replace(" ","")
|
|
||||||
return struct.pack('>LL', binascii.crc32(newname) & 0xffffffff,binascii.crc32(cc[-8:])& 0xffffffff)
|
|
||||||
|
|
||||||
def cli_main():
|
|
||||||
print u"eRdr2Pml v{0}. Copyright © 2009–2012 The Dark Reverser et al.".format(__version__)
|
|
||||||
|
|
||||||
|
def main():
|
||||||
argv=unicode_argv()
|
argv=unicode_argv()
|
||||||
try:
|
print encryption(argv[1])
|
||||||
opts, args = getopt.getopt(argv[1:], "hp", ["make-pmlz"])
|
return 0
|
||||||
except getopt.GetoptError, err:
|
|
||||||
print err.args[0]
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
make_pmlz = False
|
|
||||||
for o, a in opts:
|
|
||||||
if o == "-h":
|
|
||||||
usage()
|
|
||||||
return 0
|
|
||||||
elif o == "-p":
|
|
||||||
make_pmlz = True
|
|
||||||
elif o == "--make-pmlz":
|
|
||||||
make_pmlz = True
|
|
||||||
|
|
||||||
if len(args)!=3 and len(args)!=4:
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
|
|
||||||
if len(args)==3:
|
|
||||||
infile, name, cc = args
|
|
||||||
if make_pmlz:
|
|
||||||
outpath = os.path.splitext(infile)[0] + u".pmlz"
|
|
||||||
else:
|
|
||||||
outpath = os.path.splitext(infile)[0] + u"_Source"
|
|
||||||
elif len(args)==4:
|
|
||||||
infile, outpath, name, cc = args
|
|
||||||
|
|
||||||
print getuser_key(name,cc).encode('hex')
|
|
||||||
|
|
||||||
return decryptBook(infile, outpath, make_pmlz, getuser_key(name,cc))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.stdout=SafeUnbuffered(sys.stdout)
|
sys.stdout=SafeUnbuffered(sys.stdout)
|
||||||
sys.stderr=SafeUnbuffered(sys.stderr)
|
sys.stderr=SafeUnbuffered(sys.stderr)
|
||||||
sys.exit(cli_main())
|
sys.exit(main())
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,63 +1,127 @@
|
||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
# For use with Topaz Scripts Version 2.6
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import csv
|
import csv
|
||||||
import os
|
import os
|
||||||
|
import math
|
||||||
import getopt
|
import getopt
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
|
|
||||||
class PParser(object):
|
class DocParser(object):
|
||||||
def __init__(self, gd, flatxml, meta_array):
|
def __init__(self, flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
self.gd = gd
|
self.id = os.path.basename(fileid).replace('.dat','')
|
||||||
self.flatdoc = flatxml.split('\n')
|
self.svgcount = 0
|
||||||
self.docSize = len(self.flatdoc)
|
self.docList = flatxml.split('\n')
|
||||||
self.temp = []
|
self.docSize = len(self.docList)
|
||||||
|
self.classList = {}
|
||||||
|
self.bookDir = bookDir
|
||||||
|
self.gdict = gdict
|
||||||
|
tmpList = classlst.split('\n')
|
||||||
|
for pclass in tmpList:
|
||||||
|
if pclass != '':
|
||||||
|
# remove the leading period from the css name
|
||||||
|
cname = pclass[1:]
|
||||||
|
self.classList[cname] = True
|
||||||
|
self.fixedimage = fixedimage
|
||||||
|
self.ocrtext = []
|
||||||
|
self.link_id = []
|
||||||
|
self.link_title = []
|
||||||
|
self.link_page = []
|
||||||
|
self.link_href = []
|
||||||
|
self.link_type = []
|
||||||
|
self.dehyphen_rootid = []
|
||||||
|
self.paracont_stemid = []
|
||||||
|
self.parastems_stemid = []
|
||||||
|
|
||||||
self.ph = -1
|
|
||||||
self.pw = -1
|
|
||||||
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
|
||||||
for p in startpos:
|
|
||||||
(name, argres) = self.lineinDoc(p)
|
|
||||||
self.ph = max(self.ph, int(argres))
|
|
||||||
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
|
||||||
for p in startpos:
|
|
||||||
(name, argres) = self.lineinDoc(p)
|
|
||||||
self.pw = max(self.pw, int(argres))
|
|
||||||
|
|
||||||
if self.ph <= 0:
|
def getGlyph(self, gid):
|
||||||
self.ph = int(meta_array.get('pageHeight', '11000'))
|
result = ''
|
||||||
if self.pw <= 0:
|
id='id="gl%d"' % gid
|
||||||
self.pw = int(meta_array.get('pageWidth', '8500'))
|
return self.gdict.lookup(id)
|
||||||
|
|
||||||
res = []
|
def glyphs_to_image(self, glyphList):
|
||||||
startpos = self.posinDoc('info.glyph.x')
|
|
||||||
for p in startpos:
|
|
||||||
argres = self.getDataatPos('info.glyph.x', p)
|
|
||||||
res.extend(argres)
|
|
||||||
self.gx = res
|
|
||||||
|
|
||||||
res = []
|
def extract(path, key):
|
||||||
startpos = self.posinDoc('info.glyph.y')
|
b = path.find(key) + len(key)
|
||||||
for p in startpos:
|
e = path.find(' ',b)
|
||||||
argres = self.getDataatPos('info.glyph.y', p)
|
return int(path[b:e])
|
||||||
res.extend(argres)
|
|
||||||
self.gy = res
|
svgDir = os.path.join(self.bookDir,'svg')
|
||||||
|
|
||||||
|
imgDir = os.path.join(self.bookDir,'img')
|
||||||
|
imgname = self.id + '_%04d.svg' % self.svgcount
|
||||||
|
imgfile = os.path.join(imgDir,imgname)
|
||||||
|
|
||||||
|
# get glyph information
|
||||||
|
gxList = self.getData('info.glyph.x',0,-1)
|
||||||
|
gyList = self.getData('info.glyph.y',0,-1)
|
||||||
|
gidList = self.getData('info.glyph.glyphID',0,-1)
|
||||||
|
|
||||||
|
gids = []
|
||||||
|
maxws = []
|
||||||
|
maxhs = []
|
||||||
|
xs = []
|
||||||
|
ys = []
|
||||||
|
gdefs = []
|
||||||
|
|
||||||
|
# get path defintions, positions, dimensions for each glyph
|
||||||
|
# that makes up the image, and find min x and min y to reposition origin
|
||||||
|
minx = -1
|
||||||
|
miny = -1
|
||||||
|
for j in glyphList:
|
||||||
|
gid = gidList[j]
|
||||||
|
gids.append(gid)
|
||||||
|
|
||||||
|
xs.append(gxList[j])
|
||||||
|
if minx == -1: minx = gxList[j]
|
||||||
|
else : minx = min(minx, gxList[j])
|
||||||
|
|
||||||
|
ys.append(gyList[j])
|
||||||
|
if miny == -1: miny = gyList[j]
|
||||||
|
else : miny = min(miny, gyList[j])
|
||||||
|
|
||||||
|
path = self.getGlyph(gid)
|
||||||
|
gdefs.append(path)
|
||||||
|
|
||||||
|
maxws.append(extract(path,'width='))
|
||||||
|
maxhs.append(extract(path,'height='))
|
||||||
|
|
||||||
|
|
||||||
|
# change the origin to minx, miny and calc max height and width
|
||||||
|
maxw = maxws[0] + xs[0] - minx
|
||||||
|
maxh = maxhs[0] + ys[0] - miny
|
||||||
|
for j in xrange(0, len(xs)):
|
||||||
|
xs[j] = xs[j] - minx
|
||||||
|
ys[j] = ys[j] - miny
|
||||||
|
maxw = max( maxw, (maxws[j] + xs[j]) )
|
||||||
|
maxh = max( maxh, (maxhs[j] + ys[j]) )
|
||||||
|
|
||||||
|
# open the image file for output
|
||||||
|
ifile = open(imgfile,'w')
|
||||||
|
ifile.write('<?xml version="1.0" standalone="no"?>\n')
|
||||||
|
ifile.write('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
|
ifile.write('<svg width="%dpx" height="%dpx" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (math.floor(maxw/10), math.floor(maxh/10), maxw, maxh))
|
||||||
|
ifile.write('<defs>\n')
|
||||||
|
for j in xrange(0,len(gdefs)):
|
||||||
|
ifile.write(gdefs[j])
|
||||||
|
ifile.write('</defs>\n')
|
||||||
|
for j in xrange(0,len(gids)):
|
||||||
|
ifile.write('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (gids[j], xs[j], ys[j]))
|
||||||
|
ifile.write('</svg>')
|
||||||
|
ifile.close()
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
res = []
|
|
||||||
startpos = self.posinDoc('info.glyph.glyphID')
|
|
||||||
for p in startpos:
|
|
||||||
argres = self.getDataatPos('info.glyph.glyphID', p)
|
|
||||||
res.extend(argres)
|
|
||||||
self.gid = res
|
|
||||||
|
|
||||||
|
|
||||||
# return tag at line pos in document
|
# return tag at line pos in document
|
||||||
def lineinDoc(self, pos) :
|
def lineinDoc(self, pos) :
|
||||||
if (pos >= 0) and (pos < self.docSize) :
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
item = self.flatdoc[pos]
|
item = self.docList[pos]
|
||||||
if item.find('=') >= 0:
|
if item.find('=') >= 0:
|
||||||
(name, argres) = item.split('=',1)
|
(name, argres) = item.split('=',1)
|
||||||
else :
|
else :
|
||||||
|
@ -65,6 +129,7 @@ class PParser(object):
|
||||||
argres = ''
|
argres = ''
|
||||||
return name, argres
|
return name, argres
|
||||||
|
|
||||||
|
|
||||||
# find tag in doc if within pos to end inclusive
|
# find tag in doc if within pos to end inclusive
|
||||||
def findinDoc(self, tagpath, pos, end) :
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
result = None
|
result = None
|
||||||
|
@ -74,7 +139,7 @@ class PParser(object):
|
||||||
end = min(self.docSize, end)
|
end = min(self.docSize, end)
|
||||||
foundat = -1
|
foundat = -1
|
||||||
for j in xrange(pos, end):
|
for j in xrange(pos, end):
|
||||||
item = self.flatdoc[j]
|
item = self.docList[j]
|
||||||
if item.find('=') >= 0:
|
if item.find('=') >= 0:
|
||||||
(name, argres) = item.split('=',1)
|
(name, argres) = item.split('=',1)
|
||||||
else :
|
else :
|
||||||
|
@ -86,6 +151,7 @@ class PParser(object):
|
||||||
break
|
break
|
||||||
return foundat, result
|
return foundat, result
|
||||||
|
|
||||||
|
|
||||||
# return list of start positions for the tagpath
|
# return list of start positions for the tagpath
|
||||||
def posinDoc(self, tagpath):
|
def posinDoc(self, tagpath):
|
||||||
startpos = []
|
startpos = []
|
||||||
|
@ -98,152 +164,638 @@ class PParser(object):
|
||||||
pos = foundpos + 1
|
pos = foundpos + 1
|
||||||
return startpos
|
return startpos
|
||||||
|
|
||||||
def getData(self, path):
|
|
||||||
result = None
|
|
||||||
cnt = len(self.flatdoc)
|
|
||||||
for j in xrange(cnt):
|
|
||||||
item = self.flatdoc[j]
|
|
||||||
if item.find('=') >= 0:
|
|
||||||
(name, argt) = item.split('=')
|
|
||||||
argres = argt.split('|')
|
|
||||||
else:
|
|
||||||
name = item
|
|
||||||
argres = []
|
|
||||||
if (name.endswith(path)):
|
|
||||||
result = argres
|
|
||||||
break
|
|
||||||
if (len(argres) > 0) :
|
|
||||||
for j in xrange(0,len(argres)):
|
|
||||||
argres[j] = int(argres[j])
|
|
||||||
return result
|
|
||||||
|
|
||||||
def getDataatPos(self, path, pos):
|
# returns a vector of integers for the tagpath
|
||||||
result = None
|
def getData(self, tagpath, pos, end):
|
||||||
item = self.flatdoc[pos]
|
argres=[]
|
||||||
if item.find('=') >= 0:
|
(foundat, argt) = self.findinDoc(tagpath, pos, end)
|
||||||
(name, argt) = item.split('=')
|
if (argt != None) and (len(argt) > 0) :
|
||||||
argres = argt.split('|')
|
argList = argt.split('|')
|
||||||
else:
|
argres = [ int(strval) for strval in argList]
|
||||||
name = item
|
return argres
|
||||||
argres = []
|
|
||||||
if (len(argres) > 0) :
|
|
||||||
for j in xrange(0,len(argres)):
|
|
||||||
argres[j] = int(argres[j])
|
|
||||||
if (name.endswith(path)):
|
|
||||||
result = argres
|
|
||||||
return result
|
|
||||||
|
|
||||||
def getDataTemp(self, path):
|
|
||||||
result = None
|
|
||||||
cnt = len(self.temp)
|
|
||||||
for j in xrange(cnt):
|
|
||||||
item = self.temp[j]
|
|
||||||
if item.find('=') >= 0:
|
|
||||||
(name, argt) = item.split('=')
|
|
||||||
argres = argt.split('|')
|
|
||||||
else:
|
|
||||||
name = item
|
|
||||||
argres = []
|
|
||||||
if (name.endswith(path)):
|
|
||||||
result = argres
|
|
||||||
self.temp.pop(j)
|
|
||||||
break
|
|
||||||
if (len(argres) > 0) :
|
|
||||||
for j in xrange(0,len(argres)):
|
|
||||||
argres[j] = int(argres[j])
|
|
||||||
return result
|
|
||||||
|
|
||||||
def getImages(self):
|
# get the class
|
||||||
|
def getClass(self, pclass):
|
||||||
|
nclass = pclass
|
||||||
|
|
||||||
|
# class names are an issue given topaz may start them with numerals (not allowed),
|
||||||
|
# use a mix of cases (which cause some browsers problems), and actually
|
||||||
|
# attach numbers after "_reclustered*" to the end to deal classeses that inherit
|
||||||
|
# from a base class (but then not actually provide all of these _reclustereed
|
||||||
|
# classes in the stylesheet!
|
||||||
|
|
||||||
|
# so we clean this up by lowercasing, prepend 'cl-', and getting any baseclass
|
||||||
|
# that exists in the stylesheet first, and then adding this specific class
|
||||||
|
# after
|
||||||
|
|
||||||
|
# also some class names have spaces in them so need to convert to dashes
|
||||||
|
if nclass != None :
|
||||||
|
nclass = nclass.replace(' ','-')
|
||||||
|
classres = ''
|
||||||
|
nclass = nclass.lower()
|
||||||
|
nclass = 'cl-' + nclass
|
||||||
|
baseclass = ''
|
||||||
|
# graphic is the base class for captions
|
||||||
|
if nclass.find('cl-cap-') >=0 :
|
||||||
|
classres = 'graphic' + ' '
|
||||||
|
else :
|
||||||
|
# strip to find baseclass
|
||||||
|
p = nclass.find('_')
|
||||||
|
if p > 0 :
|
||||||
|
baseclass = nclass[0:p]
|
||||||
|
if baseclass in self.classList:
|
||||||
|
classres += baseclass + ' '
|
||||||
|
classres += nclass
|
||||||
|
nclass = classres
|
||||||
|
return nclass
|
||||||
|
|
||||||
|
|
||||||
|
# develop a sorted description of the starting positions of
|
||||||
|
# groups and regions on the page, as well as the page type
|
||||||
|
def PageDescription(self):
|
||||||
|
|
||||||
|
def compare(x, y):
|
||||||
|
(xtype, xval) = x
|
||||||
|
(ytype, yval) = y
|
||||||
|
if xval > yval:
|
||||||
|
return 1
|
||||||
|
if xval == yval:
|
||||||
|
return 0
|
||||||
|
return -1
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
self.temp = self.flatdoc
|
(pos, pagetype) = self.findinDoc('page.type',0,-1)
|
||||||
while (self.getDataTemp('img') != None):
|
|
||||||
h = self.getDataTemp('img.h')[0]
|
groupList = self.posinDoc('page.group')
|
||||||
w = self.getDataTemp('img.w')[0]
|
groupregionList = self.posinDoc('page.group.region')
|
||||||
x = self.getDataTemp('img.x')[0]
|
pageregionList = self.posinDoc('page.region')
|
||||||
y = self.getDataTemp('img.y')[0]
|
# integrate into one list
|
||||||
src = self.getDataTemp('img.src')[0]
|
for j in groupList:
|
||||||
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
result.append(('grpbeg',j))
|
||||||
return result
|
for j in groupregionList:
|
||||||
|
result.append(('gregion',j))
|
||||||
|
for j in pageregionList:
|
||||||
|
result.append(('pregion',j))
|
||||||
|
result.sort(compare)
|
||||||
|
|
||||||
|
# insert group end and page end indicators
|
||||||
|
inGroup = False
|
||||||
|
j = 0
|
||||||
|
while True:
|
||||||
|
if j == len(result): break
|
||||||
|
rtype = result[j][0]
|
||||||
|
rval = result[j][1]
|
||||||
|
if not inGroup and (rtype == 'grpbeg') :
|
||||||
|
inGroup = True
|
||||||
|
j = j + 1
|
||||||
|
elif inGroup and (rtype in ('grpbeg', 'pregion')):
|
||||||
|
result.insert(j,('grpend',rval))
|
||||||
|
inGroup = False
|
||||||
|
else:
|
||||||
|
j = j + 1
|
||||||
|
if inGroup:
|
||||||
|
result.append(('grpend',-1))
|
||||||
|
result.append(('pageend', -1))
|
||||||
|
return pagetype, result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# build a description of the paragraph
|
||||||
|
def getParaDescription(self, start, end, regtype):
|
||||||
|
|
||||||
def getGlyphs(self):
|
|
||||||
result = []
|
result = []
|
||||||
if (self.gid != None) and (len(self.gid) > 0):
|
|
||||||
glyphs = []
|
# paragraph
|
||||||
for j in set(self.gid):
|
(pos, pclass) = self.findinDoc('paragraph.class',start,end)
|
||||||
glyphs.append(j)
|
|
||||||
glyphs.sort()
|
pclass = self.getClass(pclass)
|
||||||
for gid in glyphs:
|
|
||||||
id='id="gl%d"' % gid
|
# if paragraph uses extratokens (extra glyphs) then make it fixed
|
||||||
path = self.gd.lookup(id)
|
(pos, extraglyphs) = self.findinDoc('paragraph.extratokens',start,end)
|
||||||
if path:
|
|
||||||
result.append(id + ' ' + path)
|
# build up a description of the paragraph in result and return it
|
||||||
return result
|
# first check for the basic - all words paragraph
|
||||||
|
(pos, sfirst) = self.findinDoc('paragraph.firstWord',start,end)
|
||||||
|
(pos, slast) = self.findinDoc('paragraph.lastWord',start,end)
|
||||||
|
if (sfirst != None) and (slast != None) :
|
||||||
|
first = int(sfirst)
|
||||||
|
last = int(slast)
|
||||||
|
|
||||||
|
makeImage = (regtype == 'vertical') or (regtype == 'table')
|
||||||
|
makeImage = makeImage or (extraglyphs != None)
|
||||||
|
if self.fixedimage:
|
||||||
|
makeImage = makeImage or (regtype == 'fixed')
|
||||||
|
|
||||||
|
if (pclass != None):
|
||||||
|
makeImage = makeImage or (pclass.find('.inverted') >= 0)
|
||||||
|
if self.fixedimage :
|
||||||
|
makeImage = makeImage or (pclass.find('cl-f-') >= 0)
|
||||||
|
|
||||||
|
# before creating an image make sure glyph info exists
|
||||||
|
gidList = self.getData('info.glyph.glyphID',0,-1)
|
||||||
|
|
||||||
|
makeImage = makeImage & (len(gidList) > 0)
|
||||||
|
|
||||||
|
if not makeImage :
|
||||||
|
# standard all word paragraph
|
||||||
|
for wordnum in xrange(first, last):
|
||||||
|
result.append(('ocr', wordnum))
|
||||||
|
return pclass, result
|
||||||
|
|
||||||
|
# convert paragraph to svg image
|
||||||
|
# translate first and last word into first and last glyphs
|
||||||
|
# and generate inline image and include it
|
||||||
|
glyphList = []
|
||||||
|
firstglyphList = self.getData('word.firstGlyph',0,-1)
|
||||||
|
gidList = self.getData('info.glyph.glyphID',0,-1)
|
||||||
|
firstGlyph = firstglyphList[first]
|
||||||
|
if last < len(firstglyphList):
|
||||||
|
lastGlyph = firstglyphList[last]
|
||||||
|
else :
|
||||||
|
lastGlyph = len(gidList)
|
||||||
|
|
||||||
|
# handle case of white sapce paragraphs with no actual glyphs in them
|
||||||
|
# by reverting to text based paragraph
|
||||||
|
if firstGlyph >= lastGlyph:
|
||||||
|
# revert to standard text based paragraph
|
||||||
|
for wordnum in xrange(first, last):
|
||||||
|
result.append(('ocr', wordnum))
|
||||||
|
return pclass, result
|
||||||
|
|
||||||
|
for glyphnum in xrange(firstGlyph, lastGlyph):
|
||||||
|
glyphList.append(glyphnum)
|
||||||
|
# include any extratokens if they exist
|
||||||
|
(pos, sfg) = self.findinDoc('extratokens.firstGlyph',start,end)
|
||||||
|
(pos, slg) = self.findinDoc('extratokens.lastGlyph',start,end)
|
||||||
|
if (sfg != None) and (slg != None):
|
||||||
|
for glyphnum in xrange(int(sfg), int(slg)):
|
||||||
|
glyphList.append(glyphnum)
|
||||||
|
num = self.svgcount
|
||||||
|
self.glyphs_to_image(glyphList)
|
||||||
|
self.svgcount += 1
|
||||||
|
result.append(('svg', num))
|
||||||
|
return pclass, result
|
||||||
|
|
||||||
|
# this type of paragraph may be made up of multiple spans, inline
|
||||||
|
# word monograms (images), and words with semantic meaning,
|
||||||
|
# plus glyphs used to form starting letter of first word
|
||||||
|
|
||||||
|
# need to parse this type line by line
|
||||||
|
line = start + 1
|
||||||
|
word_class = ''
|
||||||
|
|
||||||
|
# if end is -1 then we must search to end of document
|
||||||
|
if end == -1 :
|
||||||
|
end = self.docSize
|
||||||
|
|
||||||
|
# seems some xml has last* coming before first* so we have to
|
||||||
|
# handle any order
|
||||||
|
sp_first = -1
|
||||||
|
sp_last = -1
|
||||||
|
|
||||||
|
gl_first = -1
|
||||||
|
gl_last = -1
|
||||||
|
|
||||||
|
ws_first = -1
|
||||||
|
ws_last = -1
|
||||||
|
|
||||||
|
word_class = ''
|
||||||
|
|
||||||
|
word_semantic_type = ''
|
||||||
|
|
||||||
|
while (line < end) :
|
||||||
|
|
||||||
|
(name, argres) = self.lineinDoc(line)
|
||||||
|
|
||||||
|
if name.endswith('span.firstWord') :
|
||||||
|
sp_first = int(argres)
|
||||||
|
|
||||||
|
elif name.endswith('span.lastWord') :
|
||||||
|
sp_last = int(argres)
|
||||||
|
|
||||||
|
elif name.endswith('word.firstGlyph') :
|
||||||
|
gl_first = int(argres)
|
||||||
|
|
||||||
|
elif name.endswith('word.lastGlyph') :
|
||||||
|
gl_last = int(argres)
|
||||||
|
|
||||||
|
elif name.endswith('word_semantic.firstWord'):
|
||||||
|
ws_first = int(argres)
|
||||||
|
|
||||||
|
elif name.endswith('word_semantic.lastWord'):
|
||||||
|
ws_last = int(argres)
|
||||||
|
|
||||||
|
elif name.endswith('word.class'):
|
||||||
|
# we only handle spaceafter word class
|
||||||
|
try:
|
||||||
|
(cname, space) = argres.split('-',1)
|
||||||
|
if space == '' : space = '0'
|
||||||
|
if (cname == 'spaceafter') and (int(space) > 0) :
|
||||||
|
word_class = 'sa'
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
elif name.endswith('word.img.src'):
|
||||||
|
result.append(('img' + word_class, int(argres)))
|
||||||
|
word_class = ''
|
||||||
|
|
||||||
|
elif name.endswith('region.img.src'):
|
||||||
|
result.append(('img' + word_class, int(argres)))
|
||||||
|
|
||||||
|
if (sp_first != -1) and (sp_last != -1):
|
||||||
|
for wordnum in xrange(sp_first, sp_last):
|
||||||
|
result.append(('ocr', wordnum))
|
||||||
|
sp_first = -1
|
||||||
|
sp_last = -1
|
||||||
|
|
||||||
|
if (gl_first != -1) and (gl_last != -1):
|
||||||
|
glyphList = []
|
||||||
|
for glyphnum in xrange(gl_first, gl_last):
|
||||||
|
glyphList.append(glyphnum)
|
||||||
|
num = self.svgcount
|
||||||
|
self.glyphs_to_image(glyphList)
|
||||||
|
self.svgcount += 1
|
||||||
|
result.append(('svg', num))
|
||||||
|
gl_first = -1
|
||||||
|
gl_last = -1
|
||||||
|
|
||||||
|
if (ws_first != -1) and (ws_last != -1):
|
||||||
|
for wordnum in xrange(ws_first, ws_last):
|
||||||
|
result.append(('ocr', wordnum))
|
||||||
|
ws_first = -1
|
||||||
|
ws_last = -1
|
||||||
|
|
||||||
|
line += 1
|
||||||
|
|
||||||
|
return pclass, result
|
||||||
|
|
||||||
|
|
||||||
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
def buildParagraph(self, pclass, pdesc, type, regtype) :
|
||||||
mlst = []
|
parares = ''
|
||||||
pp = PParser(gdict, flat_xml, meta_array)
|
sep =''
|
||||||
mlst.append('<?xml version="1.0" standalone="no"?>\n')
|
|
||||||
if (raw):
|
classres = ''
|
||||||
mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
if pclass :
|
||||||
mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
|
classres = ' class="' + pclass + '"'
|
||||||
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
|
||||||
else:
|
br_lb = (regtype == 'fixed') or (regtype == 'chapterheading') or (regtype == 'vertical')
|
||||||
mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
|
||||||
mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
|
handle_links = len(self.link_id) > 0
|
||||||
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
|
||||||
mlst.append('<script><![CDATA[\n')
|
if (type == 'full') or (type == 'begin') :
|
||||||
mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
|
parares += '<p' + classres + '>'
|
||||||
mlst.append('var dpi=%d;\n' % scaledpi)
|
|
||||||
if (previd) :
|
if (type == 'end'):
|
||||||
mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
|
parares += ' '
|
||||||
if (nextid) :
|
|
||||||
mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
|
lstart = len(parares)
|
||||||
mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
|
|
||||||
mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
|
cnt = len(pdesc)
|
||||||
mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
|
|
||||||
mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
|
for j in xrange( 0, cnt) :
|
||||||
mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
|
|
||||||
mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
|
(wtype, num) = pdesc[j]
|
||||||
mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
|
|
||||||
mlst.append('window.onload=setsize;\n')
|
if wtype == 'ocr' :
|
||||||
mlst.append(']]></script>\n')
|
try:
|
||||||
mlst.append('</head>\n')
|
word = self.ocrtext[num]
|
||||||
mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
|
except:
|
||||||
mlst.append('<div style="white-space:nowrap;">\n')
|
word = ""
|
||||||
if previd == None:
|
|
||||||
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
sep = ' '
|
||||||
|
|
||||||
|
if handle_links:
|
||||||
|
link = self.link_id[num]
|
||||||
|
if (link > 0):
|
||||||
|
linktype = self.link_type[link-1]
|
||||||
|
title = self.link_title[link-1]
|
||||||
|
if (title == "") or (parares.rfind(title) < 0):
|
||||||
|
title=parares[lstart:]
|
||||||
|
if linktype == 'external' :
|
||||||
|
linkhref = self.link_href[link-1]
|
||||||
|
linkhtml = '<a href="%s">' % linkhref
|
||||||
|
else :
|
||||||
|
if len(self.link_page) >= link :
|
||||||
|
ptarget = self.link_page[link-1] - 1
|
||||||
|
linkhtml = '<a href="#page%04d">' % ptarget
|
||||||
|
else :
|
||||||
|
# just link to the current page
|
||||||
|
linkhtml = '<a href="#' + self.id + '">'
|
||||||
|
linkhtml += title + '</a>'
|
||||||
|
pos = parares.rfind(title)
|
||||||
|
if pos >= 0:
|
||||||
|
parares = parares[0:pos] + linkhtml + parares[pos+len(title):]
|
||||||
|
else :
|
||||||
|
parares += linkhtml
|
||||||
|
lstart = len(parares)
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
elif (link < 0) :
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
|
||||||
|
if word == '_lb_':
|
||||||
|
if ((num-1) in self.dehyphen_rootid ) or handle_links:
|
||||||
|
word = ''
|
||||||
|
sep = ''
|
||||||
|
elif br_lb :
|
||||||
|
word = '<br />\n'
|
||||||
|
sep = ''
|
||||||
|
else :
|
||||||
|
word = '\n'
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
if num in self.dehyphen_rootid :
|
||||||
|
word = word[0:-1]
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
parares += word + sep
|
||||||
|
|
||||||
|
elif wtype == 'img' :
|
||||||
|
sep = ''
|
||||||
|
parares += '<img src="img/img%04d.jpg" alt="" />' % num
|
||||||
|
parares += sep
|
||||||
|
|
||||||
|
elif wtype == 'imgsa' :
|
||||||
|
sep = ' '
|
||||||
|
parares += '<img src="img/img%04d.jpg" alt="" />' % num
|
||||||
|
parares += sep
|
||||||
|
|
||||||
|
elif wtype == 'svg' :
|
||||||
|
sep = ''
|
||||||
|
parares += '<img src="img/' + self.id + '_%04d.svg" alt="" />' % num
|
||||||
|
parares += sep
|
||||||
|
|
||||||
|
if len(sep) > 0 : parares = parares[0:-1]
|
||||||
|
if (type == 'full') or (type == 'end') :
|
||||||
|
parares += '</p>'
|
||||||
|
return parares
|
||||||
|
|
||||||
|
|
||||||
|
def buildTOCEntry(self, pdesc) :
|
||||||
|
parares = ''
|
||||||
|
sep =''
|
||||||
|
tocentry = ''
|
||||||
|
handle_links = len(self.link_id) > 0
|
||||||
|
|
||||||
|
lstart = 0
|
||||||
|
|
||||||
|
cnt = len(pdesc)
|
||||||
|
for j in xrange( 0, cnt) :
|
||||||
|
|
||||||
|
(wtype, num) = pdesc[j]
|
||||||
|
|
||||||
|
if wtype == 'ocr' :
|
||||||
|
word = self.ocrtext[num]
|
||||||
|
sep = ' '
|
||||||
|
|
||||||
|
if handle_links:
|
||||||
|
link = self.link_id[num]
|
||||||
|
if (link > 0):
|
||||||
|
linktype = self.link_type[link-1]
|
||||||
|
title = self.link_title[link-1]
|
||||||
|
title = title.rstrip('. ')
|
||||||
|
alt_title = parares[lstart:]
|
||||||
|
alt_title = alt_title.strip()
|
||||||
|
# now strip off the actual printed page number
|
||||||
|
alt_title = alt_title.rstrip('01234567890ivxldIVXLD-.')
|
||||||
|
alt_title = alt_title.rstrip('. ')
|
||||||
|
# skip over any external links - can't have them in a books toc
|
||||||
|
if linktype == 'external' :
|
||||||
|
title = ''
|
||||||
|
alt_title = ''
|
||||||
|
linkpage = ''
|
||||||
|
else :
|
||||||
|
if len(self.link_page) >= link :
|
||||||
|
ptarget = self.link_page[link-1] - 1
|
||||||
|
linkpage = '%04d' % ptarget
|
||||||
|
else :
|
||||||
|
# just link to the current page
|
||||||
|
linkpage = self.id[4:]
|
||||||
|
if len(alt_title) >= len(title):
|
||||||
|
title = alt_title
|
||||||
|
if title != '' and linkpage != '':
|
||||||
|
tocentry += title + '|' + linkpage + '\n'
|
||||||
|
lstart = len(parares)
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
elif (link < 0) :
|
||||||
|
if word == '_link_' : word = ''
|
||||||
|
|
||||||
|
if word == '_lb_':
|
||||||
|
word = ''
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
if num in self.dehyphen_rootid :
|
||||||
|
word = word[0:-1]
|
||||||
|
sep = ''
|
||||||
|
|
||||||
|
parares += word + sep
|
||||||
|
|
||||||
|
else :
|
||||||
|
continue
|
||||||
|
|
||||||
|
return tocentry
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# walk the document tree collecting the information needed
|
||||||
|
# to build an html page using the ocrText
|
||||||
|
|
||||||
|
def process(self):
|
||||||
|
|
||||||
|
tocinfo = ''
|
||||||
|
hlst = []
|
||||||
|
|
||||||
|
# get the ocr text
|
||||||
|
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
|
||||||
|
if argres : self.ocrtext = argres.split('|')
|
||||||
|
|
||||||
|
# get information to dehyphenate the text
|
||||||
|
self.dehyphen_rootid = self.getData('info.dehyphen.rootID',0,-1)
|
||||||
|
|
||||||
|
# determine if first paragraph is continued from previous page
|
||||||
|
(pos, self.parastems_stemid) = self.findinDoc('info.paraStems.stemID',0,-1)
|
||||||
|
first_para_continued = (self.parastems_stemid != None)
|
||||||
|
|
||||||
|
# determine if last paragraph is continued onto the next page
|
||||||
|
(pos, self.paracont_stemid) = self.findinDoc('info.paraCont.stemID',0,-1)
|
||||||
|
last_para_continued = (self.paracont_stemid != None)
|
||||||
|
|
||||||
|
# collect link ids
|
||||||
|
self.link_id = self.getData('info.word.link_id',0,-1)
|
||||||
|
|
||||||
|
# collect link destination page numbers
|
||||||
|
self.link_page = self.getData('info.links.page',0,-1)
|
||||||
|
|
||||||
|
# collect link types (container versus external)
|
||||||
|
(pos, argres) = self.findinDoc('info.links.type',0,-1)
|
||||||
|
if argres : self.link_type = argres.split('|')
|
||||||
|
|
||||||
|
# collect link destinations
|
||||||
|
(pos, argres) = self.findinDoc('info.links.href',0,-1)
|
||||||
|
if argres : self.link_href = argres.split('|')
|
||||||
|
|
||||||
|
# collect link titles
|
||||||
|
(pos, argres) = self.findinDoc('info.links.title',0,-1)
|
||||||
|
if argres :
|
||||||
|
self.link_title = argres.split('|')
|
||||||
else:
|
else:
|
||||||
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
|
self.link_title.append('')
|
||||||
|
|
||||||
mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
|
# get a descriptions of the starting points of the regions
|
||||||
if (pp.gid != None):
|
# and groups on the page
|
||||||
mlst.append('<defs>\n')
|
(pagetype, pageDesc) = self.PageDescription()
|
||||||
gdefs = pp.getGlyphs()
|
regcnt = len(pageDesc) - 1
|
||||||
for j in xrange(0,len(gdefs)):
|
|
||||||
mlst.append(gdefs[j])
|
anchorSet = False
|
||||||
mlst.append('</defs>\n')
|
breakSet = False
|
||||||
img = pp.getImages()
|
inGroup = False
|
||||||
if (img != None):
|
|
||||||
for j in xrange(0,len(img)):
|
# process each region on the page and convert what you can to html
|
||||||
mlst.append(img[j])
|
|
||||||
if (pp.gid != None):
|
for j in xrange(regcnt):
|
||||||
for j in xrange(0,len(pp.gid)):
|
|
||||||
mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
|
(etype, start) = pageDesc[j]
|
||||||
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
(ntype, end) = pageDesc[j+1]
|
||||||
xpos = "%d" % (pp.pw // 3)
|
|
||||||
ypos = "%d" % (pp.ph // 3)
|
|
||||||
mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
|
# set anchor for link target on this page
|
||||||
if (raw) :
|
if not anchorSet and not first_para_continued:
|
||||||
mlst.append('</svg>')
|
hlst.append('<div style="visibility: hidden; height: 0; width: 0;" id="')
|
||||||
else :
|
hlst.append(self.id + '" title="pagetype_' + pagetype + '"></div>\n')
|
||||||
mlst.append('</svg></a>\n')
|
anchorSet = True
|
||||||
if nextid == None:
|
|
||||||
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
# handle groups of graphics with text captions
|
||||||
else :
|
if (etype == 'grpbeg'):
|
||||||
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
|
(pos, grptype) = self.findinDoc('group.type', start, end)
|
||||||
mlst.append('</div>\n')
|
if grptype != None:
|
||||||
mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
|
if grptype == 'graphic':
|
||||||
mlst.append('</body>\n')
|
gcstr = ' class="' + grptype + '"'
|
||||||
mlst.append('</html>\n')
|
hlst.append('<div' + gcstr + '>')
|
||||||
return "".join(mlst)
|
inGroup = True
|
||||||
|
|
||||||
|
elif (etype == 'grpend'):
|
||||||
|
if inGroup:
|
||||||
|
hlst.append('</div>\n')
|
||||||
|
inGroup = False
|
||||||
|
|
||||||
|
else:
|
||||||
|
(pos, regtype) = self.findinDoc('region.type',start,end)
|
||||||
|
|
||||||
|
if regtype == 'graphic' :
|
||||||
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
|
if simgsrc:
|
||||||
|
if inGroup:
|
||||||
|
hlst.append('<img src="img/img%04d.jpg" alt="" />' % int(simgsrc))
|
||||||
|
else:
|
||||||
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
|
elif regtype == 'chapterheading' :
|
||||||
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
|
if not breakSet:
|
||||||
|
hlst.append('<div style="page-break-after: always;"> </div>\n')
|
||||||
|
breakSet = True
|
||||||
|
tag = 'h1'
|
||||||
|
if pclass and (len(pclass) >= 7):
|
||||||
|
if pclass[3:7] == 'ch1-' : tag = 'h1'
|
||||||
|
if pclass[3:7] == 'ch2-' : tag = 'h2'
|
||||||
|
if pclass[3:7] == 'ch3-' : tag = 'h3'
|
||||||
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
|
else:
|
||||||
|
hlst.append('<' + tag + '>')
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
|
hlst.append('</' + tag + '>')
|
||||||
|
|
||||||
|
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
|
||||||
|
ptype = 'full'
|
||||||
|
# check to see if this is a continution from the previous page
|
||||||
|
if first_para_continued :
|
||||||
|
ptype = 'end'
|
||||||
|
first_para_continued = False
|
||||||
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
|
if pclass and (len(pclass) >= 6) and (ptype == 'full'):
|
||||||
|
tag = 'p'
|
||||||
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
|
hlst.append('</' + tag + '>')
|
||||||
|
else :
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
|
elif (regtype == 'tocentry') :
|
||||||
|
ptype = 'full'
|
||||||
|
if first_para_continued :
|
||||||
|
ptype = 'end'
|
||||||
|
first_para_continued = False
|
||||||
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
|
tocinfo += self.buildTOCEntry(pdesc)
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
|
elif (regtype == 'vertical') or (regtype == 'table') :
|
||||||
|
ptype = 'full'
|
||||||
|
if inGroup:
|
||||||
|
ptype = 'middle'
|
||||||
|
if first_para_continued :
|
||||||
|
ptype = 'end'
|
||||||
|
first_para_continued = False
|
||||||
|
(pclass, pdesc) = self.getParaDescription(start, end, regtype)
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
|
||||||
|
|
||||||
|
elif (regtype == 'synth_fcvr.center'):
|
||||||
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
|
if simgsrc:
|
||||||
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
|
else :
|
||||||
|
print ' Making region type', regtype,
|
||||||
|
(pos, temp) = self.findinDoc('paragraph',start,end)
|
||||||
|
(pos2, temp) = self.findinDoc('span',start,end)
|
||||||
|
if pos != -1 or pos2 != -1:
|
||||||
|
print ' a "text" region'
|
||||||
|
orig_regtype = regtype
|
||||||
|
regtype = 'fixed'
|
||||||
|
ptype = 'full'
|
||||||
|
# check to see if this is a continution from the previous page
|
||||||
|
if first_para_continued :
|
||||||
|
ptype = 'end'
|
||||||
|
first_para_continued = False
|
||||||
|
(pclass, pdesc) = self.getParaDescription(start,end, regtype)
|
||||||
|
if not pclass:
|
||||||
|
if orig_regtype.endswith('.right') : pclass = 'cl-right'
|
||||||
|
elif orig_regtype.endswith('.center') : pclass = 'cl-center'
|
||||||
|
elif orig_regtype.endswith('.left') : pclass = 'cl-left'
|
||||||
|
elif orig_regtype.endswith('.justify') : pclass = 'cl-justify'
|
||||||
|
if pclass and (ptype == 'full') and (len(pclass) >= 6):
|
||||||
|
tag = 'p'
|
||||||
|
if pclass[3:6] == 'h1-' : tag = 'h4'
|
||||||
|
if pclass[3:6] == 'h2-' : tag = 'h5'
|
||||||
|
if pclass[3:6] == 'h3-' : tag = 'h6'
|
||||||
|
hlst.append('<' + tag + ' class="' + pclass + '">')
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
|
||||||
|
hlst.append('</' + tag + '>')
|
||||||
|
else :
|
||||||
|
hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
|
||||||
|
else :
|
||||||
|
print ' a "graphic" region'
|
||||||
|
(pos, simgsrc) = self.findinDoc('img.src',start,end)
|
||||||
|
if simgsrc:
|
||||||
|
hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
|
||||||
|
|
||||||
|
|
||||||
|
htmlpage = "".join(hlst)
|
||||||
|
if last_para_continued :
|
||||||
|
if htmlpage[-4:] == '</p>':
|
||||||
|
htmlpage = htmlpage[0:-4]
|
||||||
|
last_para_continued = False
|
||||||
|
|
||||||
|
return htmlpage, tocinfo
|
||||||
|
|
||||||
|
|
||||||
|
def convert2HTML(flatxml, classlst, fileid, bookDir, gdict, fixedimage):
|
||||||
|
# create a document parser
|
||||||
|
dp = DocParser(flatxml, classlst, fileid, bookDir, gdict, fixedimage)
|
||||||
|
htmlpage, tocinfo = dp.process()
|
||||||
|
return htmlpage, tocinfo
|
||||||
|
|
|
@ -1,148 +1,82 @@
|
||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
class Unbuffered:
|
|
||||||
def __init__(self, stream):
|
|
||||||
self.stream = stream
|
|
||||||
def write(self, data):
|
|
||||||
self.stream.write(data)
|
|
||||||
self.stream.flush()
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
return getattr(self.stream, attr)
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
sys.stdout=Unbuffered(sys.stdout)
|
|
||||||
|
|
||||||
import csv
|
import csv
|
||||||
import os
|
import os
|
||||||
import getopt
|
import getopt
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from struct import unpack
|
from struct import unpack
|
||||||
|
|
||||||
class TpzDRMError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
# local support routines
|
class PParser(object):
|
||||||
if 'calibre' in sys.modules:
|
def __init__(self, gd, flatxml, meta_array):
|
||||||
inCalibre = True
|
self.gd = gd
|
||||||
else:
|
|
||||||
inCalibre = False
|
|
||||||
|
|
||||||
if inCalibre :
|
|
||||||
from calibre_plugins.dedrm import convert2xml
|
|
||||||
from calibre_plugins.dedrm import flatxml2html
|
|
||||||
from calibre_plugins.dedrm import flatxml2svg
|
|
||||||
from calibre_plugins.dedrm import stylexml2css
|
|
||||||
else :
|
|
||||||
import convert2xml
|
|
||||||
import flatxml2html
|
|
||||||
import flatxml2svg
|
|
||||||
import stylexml2css
|
|
||||||
|
|
||||||
# global switch
|
|
||||||
buildXML = False
|
|
||||||
|
|
||||||
# Get a 7 bit encoded number from a file
|
|
||||||
def readEncodedNumber(file):
|
|
||||||
flag = False
|
|
||||||
c = file.read(1)
|
|
||||||
if (len(c) == 0):
|
|
||||||
return None
|
|
||||||
data = ord(c)
|
|
||||||
if data == 0xFF:
|
|
||||||
flag = True
|
|
||||||
c = file.read(1)
|
|
||||||
if (len(c) == 0):
|
|
||||||
return None
|
|
||||||
data = ord(c)
|
|
||||||
if data >= 0x80:
|
|
||||||
datax = (data & 0x7F)
|
|
||||||
while data >= 0x80 :
|
|
||||||
c = file.read(1)
|
|
||||||
if (len(c) == 0):
|
|
||||||
return None
|
|
||||||
data = ord(c)
|
|
||||||
datax = (datax <<7) + (data & 0x7F)
|
|
||||||
data = datax
|
|
||||||
if flag:
|
|
||||||
data = -data
|
|
||||||
return data
|
|
||||||
|
|
||||||
# Get a length prefixed string from the file
|
|
||||||
def lengthPrefixString(data):
|
|
||||||
return encodeNumber(len(data))+data
|
|
||||||
|
|
||||||
def readString(file):
|
|
||||||
stringLength = readEncodedNumber(file)
|
|
||||||
if (stringLength == None):
|
|
||||||
return None
|
|
||||||
sv = file.read(stringLength)
|
|
||||||
if (len(sv) != stringLength):
|
|
||||||
return ""
|
|
||||||
return unpack(str(stringLength)+"s",sv)[0]
|
|
||||||
|
|
||||||
def getMetaArray(metaFile):
|
|
||||||
# parse the meta file
|
|
||||||
result = {}
|
|
||||||
fo = file(metaFile,'rb')
|
|
||||||
size = readEncodedNumber(fo)
|
|
||||||
for i in xrange(size):
|
|
||||||
tag = readString(fo)
|
|
||||||
value = readString(fo)
|
|
||||||
result[tag] = value
|
|
||||||
# print tag, value
|
|
||||||
fo.close()
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
# dictionary of all text strings by index value
|
|
||||||
class Dictionary(object):
|
|
||||||
def __init__(self, dictFile):
|
|
||||||
self.filename = dictFile
|
|
||||||
self.size = 0
|
|
||||||
self.fo = file(dictFile,'rb')
|
|
||||||
self.stable = []
|
|
||||||
self.size = readEncodedNumber(self.fo)
|
|
||||||
for i in xrange(self.size):
|
|
||||||
self.stable.append(self.escapestr(readString(self.fo)))
|
|
||||||
self.pos = 0
|
|
||||||
def escapestr(self, str):
|
|
||||||
str = str.replace('&','&')
|
|
||||||
str = str.replace('<','<')
|
|
||||||
str = str.replace('>','>')
|
|
||||||
str = str.replace('=','=')
|
|
||||||
return str
|
|
||||||
def lookup(self,val):
|
|
||||||
if ((val >= 0) and (val < self.size)) :
|
|
||||||
self.pos = val
|
|
||||||
return self.stable[self.pos]
|
|
||||||
else:
|
|
||||||
print "Error: %d outside of string table limits" % val
|
|
||||||
raise TpzDRMError('outside or string table limits')
|
|
||||||
# sys.exit(-1)
|
|
||||||
def getSize(self):
|
|
||||||
return self.size
|
|
||||||
def getPos(self):
|
|
||||||
return self.pos
|
|
||||||
|
|
||||||
|
|
||||||
class PageDimParser(object):
|
|
||||||
def __init__(self, flatxml):
|
|
||||||
self.flatdoc = flatxml.split('\n')
|
self.flatdoc = flatxml.split('\n')
|
||||||
# find tag if within pos to end inclusive
|
self.docSize = len(self.flatdoc)
|
||||||
|
self.temp = []
|
||||||
|
|
||||||
|
self.ph = -1
|
||||||
|
self.pw = -1
|
||||||
|
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.ph = max(self.ph, int(argres))
|
||||||
|
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
|
||||||
|
for p in startpos:
|
||||||
|
(name, argres) = self.lineinDoc(p)
|
||||||
|
self.pw = max(self.pw, int(argres))
|
||||||
|
|
||||||
|
if self.ph <= 0:
|
||||||
|
self.ph = int(meta_array.get('pageHeight', '11000'))
|
||||||
|
if self.pw <= 0:
|
||||||
|
self.pw = int(meta_array.get('pageWidth', '8500'))
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.x')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.x', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gx = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.y')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.y', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gy = res
|
||||||
|
|
||||||
|
res = []
|
||||||
|
startpos = self.posinDoc('info.glyph.glyphID')
|
||||||
|
for p in startpos:
|
||||||
|
argres = self.getDataatPos('info.glyph.glyphID', p)
|
||||||
|
res.extend(argres)
|
||||||
|
self.gid = res
|
||||||
|
|
||||||
|
|
||||||
|
# return tag at line pos in document
|
||||||
|
def lineinDoc(self, pos) :
|
||||||
|
if (pos >= 0) and (pos < self.docSize) :
|
||||||
|
item = self.flatdoc[pos]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argres) = item.split('=',1)
|
||||||
|
else :
|
||||||
|
name = item
|
||||||
|
argres = ''
|
||||||
|
return name, argres
|
||||||
|
|
||||||
|
# find tag in doc if within pos to end inclusive
|
||||||
def findinDoc(self, tagpath, pos, end) :
|
def findinDoc(self, tagpath, pos, end) :
|
||||||
result = None
|
result = None
|
||||||
docList = self.flatdoc
|
|
||||||
cnt = len(docList)
|
|
||||||
if end == -1 :
|
if end == -1 :
|
||||||
end = cnt
|
end = self.docSize
|
||||||
else:
|
else:
|
||||||
end = min(cnt,end)
|
end = min(self.docSize, end)
|
||||||
foundat = -1
|
foundat = -1
|
||||||
for j in xrange(pos, end):
|
for j in xrange(pos, end):
|
||||||
item = docList[j]
|
item = self.flatdoc[j]
|
||||||
if item.find('=') >= 0:
|
if item.find('=') >= 0:
|
||||||
(name, argres) = item.split('=')
|
(name, argres) = item.split('=',1)
|
||||||
else :
|
else :
|
||||||
name = item
|
name = item
|
||||||
argres = ''
|
argres = ''
|
||||||
|
@ -151,44 +85,19 @@ class PageDimParser(object):
|
||||||
foundat = j
|
foundat = j
|
||||||
break
|
break
|
||||||
return foundat, result
|
return foundat, result
|
||||||
def process(self):
|
|
||||||
(pos, sph) = self.findinDoc('page.h',0,-1)
|
|
||||||
(pos, spw) = self.findinDoc('page.w',0,-1)
|
|
||||||
if (sph == None): sph = '-1'
|
|
||||||
if (spw == None): spw = '-1'
|
|
||||||
return sph, spw
|
|
||||||
|
|
||||||
def getPageDim(flatxml):
|
# return list of start positions for the tagpath
|
||||||
# create a document parser
|
def posinDoc(self, tagpath):
|
||||||
dp = PageDimParser(flatxml)
|
startpos = []
|
||||||
(ph, pw) = dp.process()
|
pos = 0
|
||||||
return ph, pw
|
res = ""
|
||||||
|
while res != None :
|
||||||
|
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
|
||||||
|
if res != None :
|
||||||
|
startpos.append(foundpos)
|
||||||
|
pos = foundpos + 1
|
||||||
|
return startpos
|
||||||
|
|
||||||
class GParser(object):
|
|
||||||
def __init__(self, flatxml):
|
|
||||||
self.flatdoc = flatxml.split('\n')
|
|
||||||
self.dpi = 1440
|
|
||||||
self.gh = self.getData('info.glyph.h')
|
|
||||||
self.gw = self.getData('info.glyph.w')
|
|
||||||
self.guse = self.getData('info.glyph.use')
|
|
||||||
if self.guse :
|
|
||||||
self.count = len(self.guse)
|
|
||||||
else :
|
|
||||||
self.count = 0
|
|
||||||
self.gvtx = self.getData('info.glyph.vtx')
|
|
||||||
self.glen = self.getData('info.glyph.len')
|
|
||||||
self.gdpi = self.getData('info.glyph.dpi')
|
|
||||||
self.vx = self.getData('info.vtx.x')
|
|
||||||
self.vy = self.getData('info.vtx.y')
|
|
||||||
self.vlen = self.getData('info.len.n')
|
|
||||||
if self.vlen :
|
|
||||||
self.glen.append(len(self.vlen))
|
|
||||||
elif self.glen:
|
|
||||||
self.glen.append(0)
|
|
||||||
if self.vx :
|
|
||||||
self.gvtx.append(len(self.vx))
|
|
||||||
elif self.gvtx :
|
|
||||||
self.gvtx.append(0)
|
|
||||||
def getData(self, path):
|
def getData(self, path):
|
||||||
result = None
|
result = None
|
||||||
cnt = len(self.flatdoc)
|
cnt = len(self.flatdoc)
|
||||||
|
@ -200,522 +109,141 @@ class GParser(object):
|
||||||
else:
|
else:
|
||||||
name = item
|
name = item
|
||||||
argres = []
|
argres = []
|
||||||
if (name == path):
|
if (name.endswith(path)):
|
||||||
result = argres
|
result = argres
|
||||||
break
|
break
|
||||||
if (len(argres) > 0) :
|
if (len(argres) > 0) :
|
||||||
for j in xrange(0,len(argres)):
|
for j in xrange(0,len(argres)):
|
||||||
argres[j] = int(argres[j])
|
argres[j] = int(argres[j])
|
||||||
return result
|
return result
|
||||||
def getGlyphDim(self, gly):
|
|
||||||
if self.gdpi[gly] == 0:
|
def getDataatPos(self, path, pos):
|
||||||
return 0, 0
|
result = None
|
||||||
maxh = (self.gh[gly] * self.dpi) / self.gdpi[gly]
|
item = self.flatdoc[pos]
|
||||||
maxw = (self.gw[gly] * self.dpi) / self.gdpi[gly]
|
if item.find('=') >= 0:
|
||||||
return maxh, maxw
|
(name, argt) = item.split('=')
|
||||||
def getPath(self, gly):
|
argres = argt.split('|')
|
||||||
path = ''
|
else:
|
||||||
if (gly < 0) or (gly >= self.count):
|
name = item
|
||||||
return path
|
argres = []
|
||||||
tx = self.vx[self.gvtx[gly]:self.gvtx[gly+1]]
|
if (len(argres) > 0) :
|
||||||
ty = self.vy[self.gvtx[gly]:self.gvtx[gly+1]]
|
for j in xrange(0,len(argres)):
|
||||||
p = 0
|
argres[j] = int(argres[j])
|
||||||
for k in xrange(self.glen[gly], self.glen[gly+1]):
|
if (name.endswith(path)):
|
||||||
if (p == 0):
|
result = argres
|
||||||
zx = tx[0:self.vlen[k]+1]
|
return result
|
||||||
zy = ty[0:self.vlen[k]+1]
|
|
||||||
|
def getDataTemp(self, path):
|
||||||
|
result = None
|
||||||
|
cnt = len(self.temp)
|
||||||
|
for j in xrange(cnt):
|
||||||
|
item = self.temp[j]
|
||||||
|
if item.find('=') >= 0:
|
||||||
|
(name, argt) = item.split('=')
|
||||||
|
argres = argt.split('|')
|
||||||
else:
|
else:
|
||||||
zx = tx[self.vlen[k-1]+1:self.vlen[k]+1]
|
name = item
|
||||||
zy = ty[self.vlen[k-1]+1:self.vlen[k]+1]
|
argres = []
|
||||||
p += 1
|
if (name.endswith(path)):
|
||||||
j = 0
|
result = argres
|
||||||
while ( j < len(zx) ):
|
self.temp.pop(j)
|
||||||
if (j == 0):
|
break
|
||||||
# Start Position.
|
if (len(argres) > 0) :
|
||||||
path += 'M %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly])
|
for j in xrange(0,len(argres)):
|
||||||
elif (j <= len(zx)-3):
|
argres[j] = int(argres[j])
|
||||||
# Cubic Bezier Curve
|
return result
|
||||||
path += 'C %d %d %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[j+1] * self.dpi / self.gdpi[gly], zy[j+1] * self.dpi / self.gdpi[gly], zx[j+2] * self.dpi / self.gdpi[gly], zy[j+2] * self.dpi / self.gdpi[gly])
|
|
||||||
j += 2
|
|
||||||
elif (j == len(zx)-2):
|
|
||||||
# Cubic Bezier Curve to Start Position
|
|
||||||
path += 'C %d %d %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[j+1] * self.dpi / self.gdpi[gly], zy[j+1] * self.dpi / self.gdpi[gly], zx[0] * self.dpi / self.gdpi[gly], zy[0] * self.dpi / self.gdpi[gly])
|
|
||||||
j += 1
|
|
||||||
elif (j == len(zx)-1):
|
|
||||||
# Quadratic Bezier Curve to Start Position
|
|
||||||
path += 'Q %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[0] * self.dpi / self.gdpi[gly], zy[0] * self.dpi / self.gdpi[gly])
|
|
||||||
|
|
||||||
j += 1
|
def getImages(self):
|
||||||
path += 'z'
|
result = []
|
||||||
return path
|
self.temp = self.flatdoc
|
||||||
|
while (self.getDataTemp('img') != None):
|
||||||
|
h = self.getDataTemp('img.h')[0]
|
||||||
|
w = self.getDataTemp('img.w')[0]
|
||||||
|
x = self.getDataTemp('img.x')[0]
|
||||||
|
y = self.getDataTemp('img.y')[0]
|
||||||
|
src = self.getDataTemp('img.src')[0]
|
||||||
|
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getGlyphs(self):
|
||||||
|
result = []
|
||||||
|
if (self.gid != None) and (len(self.gid) > 0):
|
||||||
|
glyphs = []
|
||||||
|
for j in set(self.gid):
|
||||||
|
glyphs.append(j)
|
||||||
|
glyphs.sort()
|
||||||
|
for gid in glyphs:
|
||||||
|
id='id="gl%d"' % gid
|
||||||
|
path = self.gd.lookup(id)
|
||||||
|
if path:
|
||||||
|
result.append(id + ' ' + path)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
|
||||||
# dictionary of all text strings by index value
|
mlst = []
|
||||||
class GlyphDict(object):
|
pp = PParser(gdict, flat_xml, meta_array)
|
||||||
def __init__(self):
|
mlst.append('<?xml version="1.0" standalone="no"?>\n')
|
||||||
self.gdict = {}
|
|
||||||
def lookup(self, id):
|
|
||||||
# id='id="gl%d"' % val
|
|
||||||
if id in self.gdict:
|
|
||||||
return self.gdict[id]
|
|
||||||
return None
|
|
||||||
def addGlyph(self, val, path):
|
|
||||||
id='id="gl%d"' % val
|
|
||||||
self.gdict[id] = path
|
|
||||||
|
|
||||||
|
|
||||||
def generateBook(bookDir, raw, fixedimage):
|
|
||||||
# sanity check Topaz file extraction
|
|
||||||
if not os.path.exists(bookDir) :
|
|
||||||
print "Can not find directory with unencrypted book"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
dictFile = os.path.join(bookDir,'dict0000.dat')
|
|
||||||
if not os.path.exists(dictFile) :
|
|
||||||
print "Can not find dict0000.dat file"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
pageDir = os.path.join(bookDir,'page')
|
|
||||||
if not os.path.exists(pageDir) :
|
|
||||||
print "Can not find page directory in unencrypted book"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
imgDir = os.path.join(bookDir,'img')
|
|
||||||
if not os.path.exists(imgDir) :
|
|
||||||
print "Can not find image directory in unencrypted book"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
glyphsDir = os.path.join(bookDir,'glyphs')
|
|
||||||
if not os.path.exists(glyphsDir) :
|
|
||||||
print "Can not find glyphs directory in unencrypted book"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
metaFile = os.path.join(bookDir,'metadata0000.dat')
|
|
||||||
if not os.path.exists(metaFile) :
|
|
||||||
print "Can not find metadata0000.dat in unencrypted book"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
svgDir = os.path.join(bookDir,'svg')
|
|
||||||
if not os.path.exists(svgDir) :
|
|
||||||
os.makedirs(svgDir)
|
|
||||||
|
|
||||||
if buildXML:
|
|
||||||
xmlDir = os.path.join(bookDir,'xml')
|
|
||||||
if not os.path.exists(xmlDir) :
|
|
||||||
os.makedirs(xmlDir)
|
|
||||||
|
|
||||||
otherFile = os.path.join(bookDir,'other0000.dat')
|
|
||||||
if not os.path.exists(otherFile) :
|
|
||||||
print "Can not find other0000.dat in unencrypted book"
|
|
||||||
return 1
|
|
||||||
|
|
||||||
print "Updating to color images if available"
|
|
||||||
spath = os.path.join(bookDir,'color_img')
|
|
||||||
dpath = os.path.join(bookDir,'img')
|
|
||||||
filenames = os.listdir(spath)
|
|
||||||
filenames = sorted(filenames)
|
|
||||||
for filename in filenames:
|
|
||||||
imgname = filename.replace('color','img')
|
|
||||||
sfile = os.path.join(spath,filename)
|
|
||||||
dfile = os.path.join(dpath,imgname)
|
|
||||||
imgdata = file(sfile,'rb').read()
|
|
||||||
file(dfile,'wb').write(imgdata)
|
|
||||||
|
|
||||||
print "Creating cover.jpg"
|
|
||||||
isCover = False
|
|
||||||
cpath = os.path.join(bookDir,'img')
|
|
||||||
cpath = os.path.join(cpath,'img0000.jpg')
|
|
||||||
if os.path.isfile(cpath):
|
|
||||||
cover = file(cpath, 'rb').read()
|
|
||||||
cpath = os.path.join(bookDir,'cover.jpg')
|
|
||||||
file(cpath, 'wb').write(cover)
|
|
||||||
isCover = True
|
|
||||||
|
|
||||||
|
|
||||||
print 'Processing Dictionary'
|
|
||||||
dict = Dictionary(dictFile)
|
|
||||||
|
|
||||||
print 'Processing Meta Data and creating OPF'
|
|
||||||
meta_array = getMetaArray(metaFile)
|
|
||||||
|
|
||||||
# replace special chars in title and authors like & < >
|
|
||||||
title = meta_array.get('Title','No Title Provided')
|
|
||||||
title = title.replace('&','&')
|
|
||||||
title = title.replace('<','<')
|
|
||||||
title = title.replace('>','>')
|
|
||||||
meta_array['Title'] = title
|
|
||||||
authors = meta_array.get('Authors','No Authors Provided')
|
|
||||||
authors = authors.replace('&','&')
|
|
||||||
authors = authors.replace('<','<')
|
|
||||||
authors = authors.replace('>','>')
|
|
||||||
meta_array['Authors'] = authors
|
|
||||||
|
|
||||||
if buildXML:
|
|
||||||
xname = os.path.join(xmlDir, 'metadata.xml')
|
|
||||||
mlst = []
|
|
||||||
for key in meta_array:
|
|
||||||
mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
|
|
||||||
metastr = "".join(mlst)
|
|
||||||
mlst = None
|
|
||||||
file(xname, 'wb').write(metastr)
|
|
||||||
|
|
||||||
print 'Processing StyleSheet'
|
|
||||||
|
|
||||||
# get some scaling info from metadata to use while processing styles
|
|
||||||
# and first page info
|
|
||||||
|
|
||||||
fontsize = '135'
|
|
||||||
if 'fontSize' in meta_array:
|
|
||||||
fontsize = meta_array['fontSize']
|
|
||||||
|
|
||||||
# also get the size of a normal text page
|
|
||||||
# get the total number of pages unpacked as a safety check
|
|
||||||
filenames = os.listdir(pageDir)
|
|
||||||
numfiles = len(filenames)
|
|
||||||
|
|
||||||
spage = '1'
|
|
||||||
if 'firstTextPage' in meta_array:
|
|
||||||
spage = meta_array['firstTextPage']
|
|
||||||
pnum = int(spage)
|
|
||||||
if pnum >= numfiles or pnum < 0:
|
|
||||||
# metadata is wrong so just select a page near the front
|
|
||||||
# 10% of the book to get a normal text page
|
|
||||||
pnum = int(0.10 * numfiles)
|
|
||||||
# print "first normal text page is", spage
|
|
||||||
|
|
||||||
# get page height and width from first text page for use in stylesheet scaling
|
|
||||||
pname = 'page%04d.dat' % (pnum + 1)
|
|
||||||
fname = os.path.join(pageDir,pname)
|
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
|
||||||
|
|
||||||
(ph, pw) = getPageDim(flat_xml)
|
|
||||||
if (ph == '-1') or (ph == '0') : ph = '11000'
|
|
||||||
if (pw == '-1') or (pw == '0') : pw = '8500'
|
|
||||||
meta_array['pageHeight'] = ph
|
|
||||||
meta_array['pageWidth'] = pw
|
|
||||||
if 'fontSize' not in meta_array.keys():
|
|
||||||
meta_array['fontSize'] = fontsize
|
|
||||||
|
|
||||||
# process other.dat for css info and for map of page files to svg images
|
|
||||||
# this map is needed because some pages actually are made up of multiple
|
|
||||||
# pageXXXX.xml files
|
|
||||||
xname = os.path.join(bookDir, 'style.css')
|
|
||||||
flat_xml = convert2xml.fromData(dict, otherFile)
|
|
||||||
|
|
||||||
# extract info.original.pid to get original page information
|
|
||||||
pageIDMap = {}
|
|
||||||
pageidnums = stylexml2css.getpageIDMap(flat_xml)
|
|
||||||
if len(pageidnums) == 0:
|
|
||||||
filenames = os.listdir(pageDir)
|
|
||||||
numfiles = len(filenames)
|
|
||||||
for k in range(numfiles):
|
|
||||||
pageidnums.append(k)
|
|
||||||
# create a map from page ids to list of page file nums to process for that page
|
|
||||||
for i in range(len(pageidnums)):
|
|
||||||
id = pageidnums[i]
|
|
||||||
if id in pageIDMap.keys():
|
|
||||||
pageIDMap[id].append(i)
|
|
||||||
else:
|
|
||||||
pageIDMap[id] = [i]
|
|
||||||
|
|
||||||
# now get the css info
|
|
||||||
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
|
|
||||||
file(xname, 'wb').write(cssstr)
|
|
||||||
if buildXML:
|
|
||||||
xname = os.path.join(xmlDir, 'other0000.xml')
|
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
|
|
||||||
|
|
||||||
print 'Processing Glyphs'
|
|
||||||
gd = GlyphDict()
|
|
||||||
filenames = os.listdir(glyphsDir)
|
|
||||||
filenames = sorted(filenames)
|
|
||||||
glyfname = os.path.join(svgDir,'glyphs.svg')
|
|
||||||
glyfile = open(glyfname, 'w')
|
|
||||||
glyfile.write('<?xml version="1.0" standalone="no"?>\n')
|
|
||||||
glyfile.write('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
|
||||||
glyfile.write('<svg width="512" height="512" viewBox="0 0 511 511" xmlns="http://www.w3.org/2000/svg" version="1.1">\n')
|
|
||||||
glyfile.write('<title>Glyphs for %s</title>\n' % meta_array['Title'])
|
|
||||||
glyfile.write('<defs>\n')
|
|
||||||
counter = 0
|
|
||||||
for filename in filenames:
|
|
||||||
# print ' ', filename
|
|
||||||
print '.',
|
|
||||||
fname = os.path.join(glyphsDir,filename)
|
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
|
||||||
|
|
||||||
if buildXML:
|
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
|
||||||
|
|
||||||
gp = GParser(flat_xml)
|
|
||||||
for i in xrange(0, gp.count):
|
|
||||||
path = gp.getPath(i)
|
|
||||||
maxh, maxw = gp.getGlyphDim(i)
|
|
||||||
fullpath = '<path id="gl%d" d="%s" fill="black" /><!-- width=%d height=%d -->\n' % (counter * 256 + i, path, maxw, maxh)
|
|
||||||
glyfile.write(fullpath)
|
|
||||||
gd.addGlyph(counter * 256 + i, fullpath)
|
|
||||||
counter += 1
|
|
||||||
glyfile.write('</defs>\n')
|
|
||||||
glyfile.write('</svg>\n')
|
|
||||||
glyfile.close()
|
|
||||||
print " "
|
|
||||||
|
|
||||||
|
|
||||||
# start up the html
|
|
||||||
# also build up tocentries while processing html
|
|
||||||
htmlFileName = "book.html"
|
|
||||||
hlst = []
|
|
||||||
hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
|
||||||
hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
|
|
||||||
hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
|
|
||||||
hlst.append('<head>\n')
|
|
||||||
hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
|
|
||||||
hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
|
|
||||||
hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
|
||||||
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
|
||||||
if 'ASIN' in meta_array:
|
|
||||||
hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
|
||||||
if 'GUID' in meta_array:
|
|
||||||
hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
|
||||||
hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
|
|
||||||
hlst.append('</head>\n<body>\n')
|
|
||||||
|
|
||||||
print 'Processing Pages'
|
|
||||||
# Books are at 1440 DPI. This is rendering at twice that size for
|
|
||||||
# readability when rendering to the screen.
|
|
||||||
scaledpi = 1440.0
|
|
||||||
|
|
||||||
filenames = os.listdir(pageDir)
|
|
||||||
filenames = sorted(filenames)
|
|
||||||
numfiles = len(filenames)
|
|
||||||
|
|
||||||
xmllst = []
|
|
||||||
elst = []
|
|
||||||
|
|
||||||
for filename in filenames:
|
|
||||||
# print ' ', filename
|
|
||||||
print ".",
|
|
||||||
fname = os.path.join(pageDir,filename)
|
|
||||||
flat_xml = convert2xml.fromData(dict, fname)
|
|
||||||
|
|
||||||
# keep flat_xml for later svg processing
|
|
||||||
xmllst.append(flat_xml)
|
|
||||||
|
|
||||||
if buildXML:
|
|
||||||
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
|
|
||||||
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
|
|
||||||
|
|
||||||
# first get the html
|
|
||||||
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
|
|
||||||
elst.append(tocinfo)
|
|
||||||
hlst.append(pagehtml)
|
|
||||||
|
|
||||||
# finish up the html string and output it
|
|
||||||
hlst.append('</body>\n</html>\n')
|
|
||||||
htmlstr = "".join(hlst)
|
|
||||||
hlst = None
|
|
||||||
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
|
|
||||||
|
|
||||||
print " "
|
|
||||||
print 'Extracting Table of Contents from Amazon OCR'
|
|
||||||
|
|
||||||
# first create a table of contents file for the svg images
|
|
||||||
tlst = []
|
|
||||||
tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
|
||||||
tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
|
||||||
tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
|
||||||
tlst.append('<head>\n')
|
|
||||||
tlst.append('<title>' + meta_array['Title'] + '</title>\n')
|
|
||||||
tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
|
||||||
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
|
||||||
if 'ASIN' in meta_array:
|
|
||||||
tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
|
||||||
if 'GUID' in meta_array:
|
|
||||||
tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
|
||||||
tlst.append('</head>\n')
|
|
||||||
tlst.append('<body>\n')
|
|
||||||
|
|
||||||
tlst.append('<h2>Table of Contents</h2>\n')
|
|
||||||
start = pageidnums[0]
|
|
||||||
if (raw):
|
if (raw):
|
||||||
startname = 'page%04d.svg' % start
|
mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
|
||||||
|
mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
|
||||||
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
else:
|
else:
|
||||||
startname = 'page%04d.xhtml' % start
|
mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
||||||
|
mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
|
||||||
tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
|
mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
|
||||||
# build up a table of contents for the svg xhtml output
|
mlst.append('<script><![CDATA[\n')
|
||||||
tocentries = "".join(elst)
|
mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
|
||||||
elst = None
|
mlst.append('var dpi=%d;\n' % scaledpi)
|
||||||
toclst = tocentries.split('\n')
|
if (previd) :
|
||||||
toclst.pop()
|
mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
|
||||||
for entry in toclst:
|
if (nextid) :
|
||||||
print entry
|
mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
|
||||||
title, pagenum = entry.split('|')
|
mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
|
||||||
id = pageidnums[int(pagenum)]
|
mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
|
||||||
if (raw):
|
mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
|
||||||
fname = 'page%04d.svg' % id
|
mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
|
||||||
|
mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
|
mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
|
||||||
|
mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
|
||||||
|
mlst.append('window.onload=setsize;\n')
|
||||||
|
mlst.append(']]></script>\n')
|
||||||
|
mlst.append('</head>\n')
|
||||||
|
mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
|
||||||
|
mlst.append('<div style="white-space:nowrap;">\n')
|
||||||
|
if previd == None:
|
||||||
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
else:
|
else:
|
||||||
fname = 'page%04d.xhtml' % id
|
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
|
||||||
tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
|
|
||||||
tlst.append('</body>\n')
|
|
||||||
tlst.append('</html>\n')
|
|
||||||
tochtml = "".join(tlst)
|
|
||||||
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
|
|
||||||
|
|
||||||
|
mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
|
||||||
# now create index_svg.xhtml that points to all required files
|
if (pp.gid != None):
|
||||||
slst = []
|
mlst.append('<defs>\n')
|
||||||
slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
gdefs = pp.getGlyphs()
|
||||||
slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
|
for j in xrange(0,len(gdefs)):
|
||||||
slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
|
mlst.append(gdefs[j])
|
||||||
slst.append('<head>\n')
|
mlst.append('</defs>\n')
|
||||||
slst.append('<title>' + meta_array['Title'] + '</title>\n')
|
img = pp.getImages()
|
||||||
slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
|
if (img != None):
|
||||||
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
|
for j in xrange(0,len(img)):
|
||||||
if 'ASIN' in meta_array:
|
mlst.append(img[j])
|
||||||
slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
|
if (pp.gid != None):
|
||||||
if 'GUID' in meta_array:
|
for j in xrange(0,len(pp.gid)):
|
||||||
slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
|
mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
|
||||||
slst.append('</head>\n')
|
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
|
||||||
slst.append('<body>\n')
|
xpos = "%d" % (pp.pw // 3)
|
||||||
|
ypos = "%d" % (pp.ph // 3)
|
||||||
print "Building svg images of each book page"
|
mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
|
||||||
slst.append('<h2>List of Pages</h2>\n')
|
if (raw) :
|
||||||
slst.append('<div>\n')
|
mlst.append('</svg>')
|
||||||
idlst = sorted(pageIDMap.keys())
|
else :
|
||||||
numids = len(idlst)
|
mlst.append('</svg></a>\n')
|
||||||
cnt = len(idlst)
|
if nextid == None:
|
||||||
previd = None
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
|
||||||
for j in range(cnt):
|
|
||||||
pageid = idlst[j]
|
|
||||||
if j < cnt - 1:
|
|
||||||
nextid = idlst[j+1]
|
|
||||||
else:
|
|
||||||
nextid = None
|
|
||||||
print '.',
|
|
||||||
pagelst = pageIDMap[pageid]
|
|
||||||
flst = []
|
|
||||||
for page in pagelst:
|
|
||||||
flst.append(xmllst[page])
|
|
||||||
flat_svg = "".join(flst)
|
|
||||||
flst=None
|
|
||||||
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
|
|
||||||
if (raw) :
|
|
||||||
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
|
|
||||||
slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
|
|
||||||
else :
|
else :
|
||||||
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
|
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
|
||||||
slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
|
mlst.append('</div>\n')
|
||||||
previd = pageid
|
mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
|
||||||
pfile.write(svgxml)
|
mlst.append('</body>\n')
|
||||||
pfile.close()
|
mlst.append('</html>\n')
|
||||||
counter += 1
|
return "".join(mlst)
|
||||||
slst.append('</div>\n')
|
|
||||||
slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
|
|
||||||
slst.append('</body>\n</html>\n')
|
|
||||||
svgindex = "".join(slst)
|
|
||||||
slst = None
|
|
||||||
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
|
|
||||||
|
|
||||||
print " "
|
|
||||||
|
|
||||||
# build the opf file
|
|
||||||
opfname = os.path.join(bookDir, 'book.opf')
|
|
||||||
olst = []
|
|
||||||
olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
|
|
||||||
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
|
|
||||||
# adding metadata
|
|
||||||
olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
|
|
||||||
if 'GUID' in meta_array:
|
|
||||||
olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
|
|
||||||
if 'ASIN' in meta_array:
|
|
||||||
olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
|
|
||||||
if 'oASIN' in meta_array:
|
|
||||||
olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
|
|
||||||
olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
|
|
||||||
olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
|
|
||||||
olst.append(' <dc:language>en</dc:language>\n')
|
|
||||||
olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
|
|
||||||
if isCover:
|
|
||||||
olst.append(' <meta name="cover" content="bookcover"/>\n')
|
|
||||||
olst.append(' </metadata>\n')
|
|
||||||
olst.append('<manifest>\n')
|
|
||||||
olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
|
|
||||||
olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
|
|
||||||
# adding image files to manifest
|
|
||||||
filenames = os.listdir(imgDir)
|
|
||||||
filenames = sorted(filenames)
|
|
||||||
for filename in filenames:
|
|
||||||
imgname, imgext = os.path.splitext(filename)
|
|
||||||
if imgext == '.jpg':
|
|
||||||
imgext = 'jpeg'
|
|
||||||
if imgext == '.svg':
|
|
||||||
imgext = 'svg+xml'
|
|
||||||
olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
|
|
||||||
if isCover:
|
|
||||||
olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
|
|
||||||
olst.append('</manifest>\n')
|
|
||||||
# adding spine
|
|
||||||
olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
|
|
||||||
if isCover:
|
|
||||||
olst.append(' <guide>\n')
|
|
||||||
olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
|
|
||||||
olst.append(' </guide>\n')
|
|
||||||
olst.append('</package>\n')
|
|
||||||
opfstr = "".join(olst)
|
|
||||||
olst = None
|
|
||||||
file(opfname, 'wb').write(opfstr)
|
|
||||||
|
|
||||||
print 'Processing Complete'
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def usage():
|
|
||||||
print "genbook.py generates a book from the extract Topaz Files"
|
|
||||||
print "Usage:"
|
|
||||||
print " genbook.py [-r] [-h [--fixed-image] <bookDir> "
|
|
||||||
print " "
|
|
||||||
print "Options:"
|
|
||||||
print " -h : help - print this usage message"
|
|
||||||
print " -r : generate raw svg files (not wrapped in xhtml)"
|
|
||||||
print " --fixed-image : genearate any Fixed Area as an svg image in the html"
|
|
||||||
print " "
|
|
||||||
|
|
||||||
|
|
||||||
def main(argv):
|
|
||||||
bookDir = ''
|
|
||||||
if len(argv) == 0:
|
|
||||||
argv = sys.argv
|
|
||||||
|
|
||||||
try:
|
|
||||||
opts, args = getopt.getopt(argv[1:], "rh:",["fixed-image"])
|
|
||||||
|
|
||||||
except getopt.GetoptError, err:
|
|
||||||
print str(err)
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
|
|
||||||
if len(opts) == 0 and len(args) == 0 :
|
|
||||||
usage()
|
|
||||||
return 1
|
|
||||||
|
|
||||||
raw = 0
|
|
||||||
fixedimage = True
|
|
||||||
for o, a in opts:
|
|
||||||
if o =="-h":
|
|
||||||
usage()
|
|
||||||
return 0
|
|
||||||
if o =="-r":
|
|
||||||
raw = 1
|
|
||||||
if o =="--fixed-image":
|
|
||||||
fixedimage = True
|
|
||||||
|
|
||||||
bookDir = args[0]
|
|
||||||
|
|
||||||
rv = generateBook(bookDir, raw, fixedimage)
|
|
||||||
return rv
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
sys.exit(main(''))
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,47 +3,54 @@
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
# ignoblekeygen.pyw, version 2.5
|
# ignobleepub.pyw, version 3.8
|
||||||
# Copyright © 2009-2010 i♥cabbages
|
# Copyright © 2009-2010 by i♥cabbages
|
||||||
|
|
||||||
# Released under the terms of the GNU General Public Licence, version 3
|
# Released under the terms of the GNU General Public Licence, version 3
|
||||||
# <http://www.gnu.org/licenses/>
|
# <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
# Modified 2010–2013 by some_updates, DiapDealer and Apprentice Alf
|
# Modified 2010–2013 by some_updates, DiapDealer and Apprentice Alf
|
||||||
|
|
||||||
# Windows users: Before running this program, you must first install Python.
|
# Windows users: Before running this program, you must first install Python 2.6
|
||||||
# We recommend ActiveState Python 2.7.X for Windows (x86) from
|
# from <http://www.python.org/download/> and PyCrypto from
|
||||||
# http://www.activestate.com/activepython/downloads.
|
# <http://www.voidspace.org.uk/python/modules.shtml#pycrypto> (make sure to
|
||||||
# You must also install PyCrypto from
|
# install the version for Python 2.6). Save this script file as
|
||||||
# http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
# ineptepub.pyw and double-click on it to run it.
|
||||||
# (make certain to install the version for Python 2.7).
|
|
||||||
# Then save this script file as ignoblekeygen.pyw and double-click on it to run it.
|
|
||||||
#
|
#
|
||||||
# Mac OS X users: Save this script file as ignoblekeygen.pyw. You can run this
|
# Mac OS X users: Save this script file as ineptepub.pyw. You can run this
|
||||||
# program from the command line (python ignoblekeygen.pyw) or by double-clicking
|
# program from the command line (pythonw ineptepub.pyw) or by double-clicking
|
||||||
# it when it has been associated with PythonLauncher.
|
# it when it has been associated with PythonLauncher.
|
||||||
|
|
||||||
# Revision history:
|
# Revision history:
|
||||||
# 1 - Initial release
|
# 1 - Initial release
|
||||||
# 2 - Add OS X support by using OpenSSL when available (taken/modified from ineptepub v5)
|
# 2 - Added OS X support by using OpenSSL when available
|
||||||
# 2.1 - Allow Windows versions of libcrypto to be found
|
# 3 - screen out improper key lengths to prevent segfaults on Linux
|
||||||
# 2.2 - On Windows try PyCrypto first and then OpenSSL next
|
# 3.1 - Allow Windows versions of libcrypto to be found
|
||||||
# 2.3 - Modify interface to allow use of import
|
# 3.2 - add support for encoding to 'utf-8' when building up list of files to decrypt from encryption.xml
|
||||||
# 2.4 - Improvements to UI and now works in plugins
|
# 3.3 - On Windows try PyCrypto first, OpenSSL next
|
||||||
# 2.5 - Additional improvement for unicode and plugin support
|
# 3.4 - Modify interface to allow use with import
|
||||||
# 2.6 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
# 3.5 - Fix for potential problem with PyCrypto
|
||||||
# 2.7 - Work if TkInter is missing
|
# 3.6 - Revised to allow use in calibre plugins to eliminate need for duplicate code
|
||||||
|
# 3.7 - Tweaked to match ineptepub more closely
|
||||||
|
# 3.8 - Fixed to retain zip file metadata (e.g. file modification date)
|
||||||
|
# 3.9 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
||||||
|
# 4.0 - Work if TkInter is missing
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Generate Barnes & Noble EPUB user key from name and credit card number.
|
Decrypt Barnes & Noble encrypted ePub books.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__version__ = "2.7"
|
__version__ = "4.0"
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import hashlib
|
import traceback
|
||||||
|
import zlib
|
||||||
|
import zipfile
|
||||||
|
from zipfile import ZipInfo, ZipFile, ZIP_STORED, ZIP_DEFLATED
|
||||||
|
from contextlib import closing
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
# Wrap a stream so that output gets flushed immediately
|
# Wrap a stream so that output gets flushed immediately
|
||||||
# and also make sure that any unicode strings get
|
# and also make sure that any unicode strings get
|
||||||
|
@ -75,8 +82,8 @@ def unicode_argv():
|
||||||
|
|
||||||
# Versions 2.x of Python don't support Unicode in sys.argv on
|
# Versions 2.x of Python don't support Unicode in sys.argv on
|
||||||
# Windows, with the underlying Windows API instead replacing multi-byte
|
# Windows, with the underlying Windows API instead replacing multi-byte
|
||||||
# characters with '?'. So use shell32.GetCommandLineArgvW to get sys.argv
|
# characters with '?'.
|
||||||
# as a list of Unicode strings and encode them as utf-8
|
|
||||||
|
|
||||||
from ctypes import POINTER, byref, cdll, c_int, windll
|
from ctypes import POINTER, byref, cdll, c_int, windll
|
||||||
from ctypes.wintypes import LPCWSTR, LPWSTR
|
from ctypes.wintypes import LPCWSTR, LPWSTR
|
||||||
|
@ -97,9 +104,7 @@ def unicode_argv():
|
||||||
start = argc.value - len(sys.argv)
|
start = argc.value - len(sys.argv)
|
||||||
return [argv[i] for i in
|
return [argv[i] for i in
|
||||||
xrange(start, argc.value)]
|
xrange(start, argc.value)]
|
||||||
# if we don't have any arguments at all, just pass back script name
|
return [u"ineptepub.py"]
|
||||||
# this should never happen
|
|
||||||
return [u"ignoblekeygen.py"]
|
|
||||||
else:
|
else:
|
||||||
argvencoding = sys.stdin.encoding
|
argvencoding = sys.stdin.encoding
|
||||||
if argvencoding == None:
|
if argvencoding == None:
|
||||||
|
@ -140,26 +145,29 @@ def _load_crypto_libcrypto():
|
||||||
func.argtypes = argtypes
|
func.argtypes = argtypes
|
||||||
return func
|
return func
|
||||||
|
|
||||||
AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key',
|
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
|
||||||
[c_char_p, c_int, AES_KEY_p])
|
[c_char_p, c_int, AES_KEY_p])
|
||||||
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
||||||
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
c_int])
|
c_int])
|
||||||
|
|
||||||
class AES(object):
|
class AES(object):
|
||||||
def __init__(self, userkey, iv):
|
def __init__(self, userkey):
|
||||||
self._blocksize = len(userkey)
|
self._blocksize = len(userkey)
|
||||||
self._iv = iv
|
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
||||||
|
raise IGNOBLEError('AES improper key used')
|
||||||
|
return
|
||||||
key = self._key = AES_KEY()
|
key = self._key = AES_KEY()
|
||||||
rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key)
|
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
|
||||||
if rv < 0:
|
if rv < 0:
|
||||||
raise IGNOBLEError('Failed to initialize AES Encrypt key')
|
raise IGNOBLEError('Failed to initialize AES key')
|
||||||
|
|
||||||
def encrypt(self, data):
|
def decrypt(self, data):
|
||||||
out = create_string_buffer(len(data))
|
out = create_string_buffer(len(data))
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1)
|
iv = ("\x00" * self._blocksize)
|
||||||
|
rv = AES_cbc_encrypt(data, out, len(data), self._key, iv, 0)
|
||||||
if rv == 0:
|
if rv == 0:
|
||||||
raise IGNOBLEError('AES encryption failed')
|
raise IGNOBLEError('AES decryption failed')
|
||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
return AES
|
return AES
|
||||||
|
@ -168,11 +176,11 @@ def _load_crypto_pycrypto():
|
||||||
from Crypto.Cipher import AES as _AES
|
from Crypto.Cipher import AES as _AES
|
||||||
|
|
||||||
class AES(object):
|
class AES(object):
|
||||||
def __init__(self, key, iv):
|
def __init__(self, key):
|
||||||
self._aes = _AES.new(key, _AES.MODE_CBC, iv)
|
self._aes = _AES.new(key, _AES.MODE_CBC, '\x00'*16)
|
||||||
|
|
||||||
def encrypt(self, data):
|
def decrypt(self, data):
|
||||||
return self._aes.encrypt(data)
|
return self._aes.decrypt(data)
|
||||||
|
|
||||||
return AES
|
return AES
|
||||||
|
|
||||||
|
@ -191,29 +199,123 @@ def _load_crypto():
|
||||||
|
|
||||||
AES = _load_crypto()
|
AES = _load_crypto()
|
||||||
|
|
||||||
def normalize_name(name):
|
META_NAMES = ('mimetype', 'META-INF/rights.xml', 'META-INF/encryption.xml')
|
||||||
return ''.join(x for x in name.lower() if x != ' ')
|
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
||||||
|
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
||||||
|
|
||||||
|
class Decryptor(object):
|
||||||
|
def __init__(self, bookkey, encryption):
|
||||||
|
enc = lambda tag: '{%s}%s' % (NSMAP['enc'], tag)
|
||||||
|
self._aes = AES(bookkey)
|
||||||
|
encryption = etree.fromstring(encryption)
|
||||||
|
self._encrypted = encrypted = set()
|
||||||
|
expr = './%s/%s/%s' % (enc('EncryptedData'), enc('CipherData'),
|
||||||
|
enc('CipherReference'))
|
||||||
|
for elem in encryption.findall(expr):
|
||||||
|
path = elem.get('URI', None)
|
||||||
|
if path is not None:
|
||||||
|
path = path.encode('utf-8')
|
||||||
|
encrypted.add(path)
|
||||||
|
|
||||||
def generate_key(name, ccn):
|
def decompress(self, bytes):
|
||||||
# remove spaces and case from name and CC numbers.
|
dc = zlib.decompressobj(-15)
|
||||||
if type(name)==unicode:
|
bytes = dc.decompress(bytes)
|
||||||
name = name.encode('utf-8')
|
ex = dc.decompress('Z') + dc.flush()
|
||||||
if type(ccn)==unicode:
|
if ex:
|
||||||
ccn = ccn.encode('utf-8')
|
bytes = bytes + ex
|
||||||
|
return bytes
|
||||||
|
|
||||||
name = normalize_name(name) + '\x00'
|
def decrypt(self, path, data):
|
||||||
ccn = normalize_name(ccn) + '\x00'
|
if path in self._encrypted:
|
||||||
|
data = self._aes.decrypt(data)[16:]
|
||||||
name_sha = hashlib.sha1(name).digest()[:16]
|
data = data[:-ord(data[-1])]
|
||||||
ccn_sha = hashlib.sha1(ccn).digest()[:16]
|
data = self.decompress(data)
|
||||||
both_sha = hashlib.sha1(name + ccn).digest()
|
return data
|
||||||
aes = AES(ccn_sha, name_sha)
|
|
||||||
crypt = aes.encrypt(both_sha + ('\x0c' * 0x0c))
|
|
||||||
userkey = hashlib.sha1(crypt).digest()
|
|
||||||
return userkey.encode('base64')
|
|
||||||
|
|
||||||
|
# check file to make check whether it's probably an Adobe Adept encrypted ePub
|
||||||
|
def ignobleBook(inpath):
|
||||||
|
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
||||||
|
namelist = set(inf.namelist())
|
||||||
|
if 'META-INF/rights.xml' not in namelist or \
|
||||||
|
'META-INF/encryption.xml' not in namelist:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
if len(bookkey) == 64:
|
||||||
|
return True
|
||||||
|
except:
|
||||||
|
# if we couldn't check, assume it is
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def decryptBook(keyb64, inpath, outpath):
|
||||||
|
if AES is None:
|
||||||
|
raise IGNOBLEError(u"PyCrypto or OpenSSL must be installed.")
|
||||||
|
key = keyb64.decode('base64')[:16]
|
||||||
|
aes = AES(key)
|
||||||
|
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
||||||
|
namelist = set(inf.namelist())
|
||||||
|
if 'META-INF/rights.xml' not in namelist or \
|
||||||
|
'META-INF/encryption.xml' not in namelist:
|
||||||
|
print u"{0:s} is DRM-free.".format(os.path.basename(inpath))
|
||||||
|
return 1
|
||||||
|
for name in META_NAMES:
|
||||||
|
namelist.remove(name)
|
||||||
|
try:
|
||||||
|
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
||||||
|
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
||||||
|
expr = './/%s' % (adept('encryptedKey'),)
|
||||||
|
bookkey = ''.join(rights.findtext(expr))
|
||||||
|
if len(bookkey) != 64:
|
||||||
|
print u"{0:s} is not a secure Barnes & Noble ePub.".format(os.path.basename(inpath))
|
||||||
|
return 1
|
||||||
|
bookkey = aes.decrypt(bookkey.decode('base64'))
|
||||||
|
bookkey = bookkey[:-ord(bookkey[-1])]
|
||||||
|
encryption = inf.read('META-INF/encryption.xml')
|
||||||
|
decryptor = Decryptor(bookkey[-16:], encryption)
|
||||||
|
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
||||||
|
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
||||||
|
zi = ZipInfo('mimetype')
|
||||||
|
zi.compress_type=ZIP_STORED
|
||||||
|
try:
|
||||||
|
# if the mimetype is present, get its info, including time-stamp
|
||||||
|
oldzi = inf.getinfo('mimetype')
|
||||||
|
# copy across fields to be preserved
|
||||||
|
zi.date_time = oldzi.date_time
|
||||||
|
zi.comment = oldzi.comment
|
||||||
|
zi.extra = oldzi.extra
|
||||||
|
zi.internal_attr = oldzi.internal_attr
|
||||||
|
# external attributes are dependent on the create system, so copy both.
|
||||||
|
zi.external_attr = oldzi.external_attr
|
||||||
|
zi.create_system = oldzi.create_system
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
outf.writestr(zi, inf.read('mimetype'))
|
||||||
|
for path in namelist:
|
||||||
|
data = inf.read(path)
|
||||||
|
zi = ZipInfo(path)
|
||||||
|
zi.compress_type=ZIP_DEFLATED
|
||||||
|
try:
|
||||||
|
# get the file info, including time-stamp
|
||||||
|
oldzi = inf.getinfo(path)
|
||||||
|
# copy across useful fields
|
||||||
|
zi.date_time = oldzi.date_time
|
||||||
|
zi.comment = oldzi.comment
|
||||||
|
zi.extra = oldzi.extra
|
||||||
|
zi.internal_attr = oldzi.internal_attr
|
||||||
|
# external attributes are dependent on the create system, so copy both.
|
||||||
|
zi.external_attr = oldzi.external_attr
|
||||||
|
zi.create_system = oldzi.create_system
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
outf.writestr(zi, decryptor.decrypt(path, data))
|
||||||
|
except:
|
||||||
|
print u"Could not decrypt {0:s} because of an exception:\n{1:s}".format(os.path.basename(inpath), traceback.format_exc())
|
||||||
|
return 2
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def cli_main():
|
def cli_main():
|
||||||
|
@ -221,19 +323,15 @@ def cli_main():
|
||||||
sys.stderr=SafeUnbuffered(sys.stderr)
|
sys.stderr=SafeUnbuffered(sys.stderr)
|
||||||
argv=unicode_argv()
|
argv=unicode_argv()
|
||||||
progname = os.path.basename(argv[0])
|
progname = os.path.basename(argv[0])
|
||||||
if AES is None:
|
|
||||||
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
|
||||||
"separately. Read the top-of-script comment for details." % \
|
|
||||||
(progname,)
|
|
||||||
return 1
|
|
||||||
if len(argv) != 4:
|
if len(argv) != 4:
|
||||||
print u"usage: {0} <Name> <CC#> <keyfileout.b64>".format(progname)
|
print u"usage: {0} <keyfile.b64> <inbook.epub> <outbook.epub>".format(progname)
|
||||||
return 1
|
return 1
|
||||||
name, ccn, keypath = argv[1:]
|
keypath, inpath, outpath = argv[1:]
|
||||||
userkey = generate_key(name, ccn)
|
userkey = open(keypath,'rb').read()
|
||||||
open(keypath,'wb').write(userkey)
|
result = decryptBook(userkey, inpath, outpath)
|
||||||
return 0
|
if result == 0:
|
||||||
|
print u"Successfully decrypted {0:s} as {1:s}".format(os.path.basename(inpath),os.path.basename(outpath))
|
||||||
|
return result
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
try:
|
try:
|
||||||
|
@ -247,28 +345,33 @@ def gui_main():
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
Tkinter.Frame.__init__(self, root, border=5)
|
Tkinter.Frame.__init__(self, root, border=5)
|
||||||
self.status = Tkinter.Label(self, text=u"Enter parameters")
|
self.status = Tkinter.Label(self, text=u"Select files for decryption")
|
||||||
self.status.pack(fill=Tkconstants.X, expand=1)
|
self.status.pack(fill=Tkconstants.X, expand=1)
|
||||||
body = Tkinter.Frame(self)
|
body = Tkinter.Frame(self)
|
||||||
body.pack(fill=Tkconstants.X, expand=1)
|
body.pack(fill=Tkconstants.X, expand=1)
|
||||||
sticky = Tkconstants.E + Tkconstants.W
|
sticky = Tkconstants.E + Tkconstants.W
|
||||||
body.grid_columnconfigure(1, weight=2)
|
body.grid_columnconfigure(1, weight=2)
|
||||||
Tkinter.Label(body, text=u"Account Name").grid(row=0)
|
Tkinter.Label(body, text=u"Key file").grid(row=0)
|
||||||
self.name = Tkinter.Entry(body, width=40)
|
self.keypath = Tkinter.Entry(body, width=30)
|
||||||
self.name.grid(row=0, column=1, sticky=sticky)
|
self.keypath.grid(row=0, column=1, sticky=sticky)
|
||||||
Tkinter.Label(body, text=u"CC#").grid(row=1)
|
if os.path.exists(u"bnepubkey.b64"):
|
||||||
self.ccn = Tkinter.Entry(body, width=40)
|
self.keypath.insert(0, u"bnepubkey.b64")
|
||||||
self.ccn.grid(row=1, column=1, sticky=sticky)
|
|
||||||
Tkinter.Label(body, text=u"Output file").grid(row=2)
|
|
||||||
self.keypath = Tkinter.Entry(body, width=40)
|
|
||||||
self.keypath.grid(row=2, column=1, sticky=sticky)
|
|
||||||
self.keypath.insert(2, u"bnepubkey.b64")
|
|
||||||
button = Tkinter.Button(body, text=u"...", command=self.get_keypath)
|
button = Tkinter.Button(body, text=u"...", command=self.get_keypath)
|
||||||
|
button.grid(row=0, column=2)
|
||||||
|
Tkinter.Label(body, text=u"Input file").grid(row=1)
|
||||||
|
self.inpath = Tkinter.Entry(body, width=30)
|
||||||
|
self.inpath.grid(row=1, column=1, sticky=sticky)
|
||||||
|
button = Tkinter.Button(body, text=u"...", command=self.get_inpath)
|
||||||
|
button.grid(row=1, column=2)
|
||||||
|
Tkinter.Label(body, text=u"Output file").grid(row=2)
|
||||||
|
self.outpath = Tkinter.Entry(body, width=30)
|
||||||
|
self.outpath.grid(row=2, column=1, sticky=sticky)
|
||||||
|
button = Tkinter.Button(body, text=u"...", command=self.get_outpath)
|
||||||
button.grid(row=2, column=2)
|
button.grid(row=2, column=2)
|
||||||
buttons = Tkinter.Frame(self)
|
buttons = Tkinter.Frame(self)
|
||||||
buttons.pack()
|
buttons.pack()
|
||||||
botton = Tkinter.Button(
|
botton = Tkinter.Button(
|
||||||
buttons, text=u"Generate", width=10, command=self.generate)
|
buttons, text=u"Decrypt", width=10, command=self.decrypt)
|
||||||
botton.pack(side=Tkconstants.LEFT)
|
botton.pack(side=Tkconstants.LEFT)
|
||||||
Tkinter.Frame(buttons, width=10).pack(side=Tkconstants.LEFT)
|
Tkinter.Frame(buttons, width=10).pack(side=Tkconstants.LEFT)
|
||||||
button = Tkinter.Button(
|
button = Tkinter.Button(
|
||||||
|
@ -276,8 +379,8 @@ def gui_main():
|
||||||
button.pack(side=Tkconstants.RIGHT)
|
button.pack(side=Tkconstants.RIGHT)
|
||||||
|
|
||||||
def get_keypath(self):
|
def get_keypath(self):
|
||||||
keypath = tkFileDialog.asksaveasfilename(
|
keypath = tkFileDialog.askopenfilename(
|
||||||
parent=None, title=u"Select B&N ePub key file to produce",
|
parent=None, title=u"Select Barnes & Noble \'.b64\' key file",
|
||||||
defaultextension=u".b64",
|
defaultextension=u".b64",
|
||||||
filetypes=[('base64-encoded files', '.b64'),
|
filetypes=[('base64-encoded files', '.b64'),
|
||||||
('All Files', '.*')])
|
('All Files', '.*')])
|
||||||
|
@ -287,37 +390,56 @@ def gui_main():
|
||||||
self.keypath.insert(0, keypath)
|
self.keypath.insert(0, keypath)
|
||||||
return
|
return
|
||||||
|
|
||||||
def generate(self):
|
def get_inpath(self):
|
||||||
name = self.name.get()
|
inpath = tkFileDialog.askopenfilename(
|
||||||
ccn = self.ccn.get()
|
parent=None, title=u"Select B&N-encrypted ePub file to decrypt",
|
||||||
|
defaultextension=u".epub", filetypes=[('ePub files', '.epub')])
|
||||||
|
if inpath:
|
||||||
|
inpath = os.path.normpath(inpath)
|
||||||
|
self.inpath.delete(0, Tkconstants.END)
|
||||||
|
self.inpath.insert(0, inpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def get_outpath(self):
|
||||||
|
outpath = tkFileDialog.asksaveasfilename(
|
||||||
|
parent=None, title=u"Select unencrypted ePub file to produce",
|
||||||
|
defaultextension=u".epub", filetypes=[('ePub files', '.epub')])
|
||||||
|
if outpath:
|
||||||
|
outpath = os.path.normpath(outpath)
|
||||||
|
self.outpath.delete(0, Tkconstants.END)
|
||||||
|
self.outpath.insert(0, outpath)
|
||||||
|
return
|
||||||
|
|
||||||
|
def decrypt(self):
|
||||||
keypath = self.keypath.get()
|
keypath = self.keypath.get()
|
||||||
if not name:
|
inpath = self.inpath.get()
|
||||||
self.status['text'] = u"Name not specified"
|
outpath = self.outpath.get()
|
||||||
|
if not keypath or not os.path.exists(keypath):
|
||||||
|
self.status['text'] = u"Specified key file does not exist"
|
||||||
return
|
return
|
||||||
if not ccn:
|
if not inpath or not os.path.exists(inpath):
|
||||||
self.status['text'] = u"Credit card number not specified"
|
self.status['text'] = u"Specified input file does not exist"
|
||||||
return
|
return
|
||||||
if not keypath:
|
if not outpath:
|
||||||
self.status['text'] = u"Output keyfile path not specified"
|
self.status['text'] = u"Output file not specified"
|
||||||
return
|
return
|
||||||
self.status['text'] = u"Generating..."
|
if inpath == outpath:
|
||||||
|
self.status['text'] = u"Must have different input and output files"
|
||||||
|
return
|
||||||
|
userkey = open(keypath,'rb').read()
|
||||||
|
self.status['text'] = u"Decrypting..."
|
||||||
try:
|
try:
|
||||||
userkey = generate_key(name, ccn)
|
decrypt_status = decryptBook(userkey, inpath, outpath)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
self.status['text'] = u"Error: (0}".format(e.args[0])
|
self.status['text'] = u"Error: {0}".format(e.args[0])
|
||||||
return
|
return
|
||||||
open(keypath,'wb').write(userkey)
|
if decrypt_status == 0:
|
||||||
self.status['text'] = u"Keyfile successfully generated"
|
self.status['text'] = u"File successfully decrypted"
|
||||||
|
else:
|
||||||
|
self.status['text'] = u"The was an error decrypting the file."
|
||||||
|
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
if AES is None:
|
root.title(u"Barnes & Noble ePub Decrypter v.{0}".format(__version__))
|
||||||
root.withdraw()
|
|
||||||
tkMessageBox.showerror(
|
|
||||||
"Ignoble EPUB Keyfile Generator",
|
|
||||||
"This script requires OpenSSL or PyCrypto, which must be installed "
|
|
||||||
"separately. Read the top-of-script comment for details.")
|
|
||||||
return 1
|
|
||||||
root.title(u"Barnes & Noble ePub Keyfile Generator v.{0}".format(__version__))
|
|
||||||
root.resizable(True, False)
|
root.resizable(True, False)
|
||||||
root.minsize(300, 0)
|
root.minsize(300, 0)
|
||||||
DecryptionDialog(root).pack(fill=Tkconstants.X, expand=1)
|
DecryptionDialog(root).pack(fill=Tkconstants.X, expand=1)
|
||||||
|
|
|
@ -3,56 +3,47 @@
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
|
|
||||||
# ineptepub.pyw, version 6.1
|
# ignoblekeygen.pyw, version 2.5
|
||||||
# Copyright © 2009-2010 by i♥cabbages
|
# Copyright © 2009-2010 i♥cabbages
|
||||||
|
|
||||||
# Released under the terms of the GNU General Public Licence, version 3
|
# Released under the terms of the GNU General Public Licence, version 3
|
||||||
# <http://www.gnu.org/licenses/>
|
# <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
# Modified 2010–2013 by some_updates, DiapDealer and Apprentice Alf
|
# Modified 2010–2013 by some_updates, DiapDealer and Apprentice Alf
|
||||||
|
|
||||||
# Windows users: Before running this program, you must first install Python 2.6
|
# Windows users: Before running this program, you must first install Python.
|
||||||
# from <http://www.python.org/download/> and PyCrypto from
|
# We recommend ActiveState Python 2.7.X for Windows (x86) from
|
||||||
# <http://www.voidspace.org.uk/python/modules.shtml#pycrypto> (make sure to
|
# http://www.activestate.com/activepython/downloads.
|
||||||
# install the version for Python 2.6). Save this script file as
|
# You must also install PyCrypto from
|
||||||
# ineptepub.pyw and double-click on it to run it.
|
# http://www.voidspace.org.uk/python/modules.shtml#pycrypto
|
||||||
|
# (make certain to install the version for Python 2.7).
|
||||||
|
# Then save this script file as ignoblekeygen.pyw and double-click on it to run it.
|
||||||
#
|
#
|
||||||
# Mac OS X users: Save this script file as ineptepub.pyw. You can run this
|
# Mac OS X users: Save this script file as ignoblekeygen.pyw. You can run this
|
||||||
# program from the command line (pythonw ineptepub.pyw) or by double-clicking
|
# program from the command line (python ignoblekeygen.pyw) or by double-clicking
|
||||||
# it when it has been associated with PythonLauncher.
|
# it when it has been associated with PythonLauncher.
|
||||||
|
|
||||||
# Revision history:
|
# Revision history:
|
||||||
# 1 - Initial release
|
# 1 - Initial release
|
||||||
# 2 - Rename to INEPT, fix exit code
|
# 2 - Add OS X support by using OpenSSL when available (taken/modified from ineptepub v5)
|
||||||
# 5 - Version bump to avoid (?) confusion;
|
# 2.1 - Allow Windows versions of libcrypto to be found
|
||||||
# Improve OS X support by using OpenSSL when available
|
# 2.2 - On Windows try PyCrypto first and then OpenSSL next
|
||||||
# 5.1 - Improve OpenSSL error checking
|
# 2.3 - Modify interface to allow use of import
|
||||||
# 5.2 - Fix ctypes error causing segfaults on some systems
|
# 2.4 - Improvements to UI and now works in plugins
|
||||||
# 5.3 - add support for OpenSSL on Windows, fix bug with some versions of libcrypto 0.9.8 prior to path level o
|
# 2.5 - Additional improvement for unicode and plugin support
|
||||||
# 5.4 - add support for encoding to 'utf-8' when building up list of files to decrypt from encryption.xml
|
# 2.6 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
||||||
# 5.5 - On Windows try PyCrypto first, OpenSSL next
|
# 2.7 - Work if TkInter is missing
|
||||||
# 5.6 - Modify interface to allow use with import
|
|
||||||
# 5.7 - Fix for potential problem with PyCrypto
|
|
||||||
# 5.8 - Revised to allow use in calibre plugins to eliminate need for duplicate code
|
|
||||||
# 5.9 - Fixed to retain zip file metadata (e.g. file modification date)
|
|
||||||
# 6.0 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
|
||||||
# 6.1 - Work if TkInter is missing
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Decrypt Adobe Digital Editions encrypted ePub books.
|
Generate Barnes & Noble EPUB user key from name and credit card number.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__version__ = "6.1"
|
__version__ = "2.7"
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import traceback
|
import hashlib
|
||||||
import zlib
|
|
||||||
import zipfile
|
|
||||||
from zipfile import ZipInfo, ZipFile, ZIP_STORED, ZIP_DEFLATED
|
|
||||||
from contextlib import closing
|
|
||||||
import xml.etree.ElementTree as etree
|
|
||||||
|
|
||||||
# Wrap a stream so that output gets flushed immediately
|
# Wrap a stream so that output gets flushed immediately
|
||||||
# and also make sure that any unicode strings get
|
# and also make sure that any unicode strings get
|
||||||
|
@ -84,8 +75,8 @@ def unicode_argv():
|
||||||
|
|
||||||
# Versions 2.x of Python don't support Unicode in sys.argv on
|
# Versions 2.x of Python don't support Unicode in sys.argv on
|
||||||
# Windows, with the underlying Windows API instead replacing multi-byte
|
# Windows, with the underlying Windows API instead replacing multi-byte
|
||||||
# characters with '?'.
|
# characters with '?'. So use shell32.GetCommandLineArgvW to get sys.argv
|
||||||
|
# as a list of Unicode strings and encode them as utf-8
|
||||||
|
|
||||||
from ctypes import POINTER, byref, cdll, c_int, windll
|
from ctypes import POINTER, byref, cdll, c_int, windll
|
||||||
from ctypes.wintypes import LPCWSTR, LPWSTR
|
from ctypes.wintypes import LPCWSTR, LPWSTR
|
||||||
|
@ -106,7 +97,9 @@ def unicode_argv():
|
||||||
start = argc.value - len(sys.argv)
|
start = argc.value - len(sys.argv)
|
||||||
return [argv[i] for i in
|
return [argv[i] for i in
|
||||||
xrange(start, argc.value)]
|
xrange(start, argc.value)]
|
||||||
return [u"ineptepub.py"]
|
# if we don't have any arguments at all, just pass back script name
|
||||||
|
# this should never happen
|
||||||
|
return [u"ignoblekeygen.py"]
|
||||||
else:
|
else:
|
||||||
argvencoding = sys.stdin.encoding
|
argvencoding = sys.stdin.encoding
|
||||||
if argvencoding == None:
|
if argvencoding == None:
|
||||||
|
@ -114,7 +107,7 @@ def unicode_argv():
|
||||||
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
||||||
|
|
||||||
|
|
||||||
class ADEPTError(Exception):
|
class IGNOBLEError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _load_crypto_libcrypto():
|
def _load_crypto_libcrypto():
|
||||||
|
@ -128,19 +121,14 @@ def _load_crypto_libcrypto():
|
||||||
libcrypto = find_library('crypto')
|
libcrypto = find_library('crypto')
|
||||||
|
|
||||||
if libcrypto is None:
|
if libcrypto is None:
|
||||||
raise ADEPTError('libcrypto not found')
|
raise IGNOBLEError('libcrypto not found')
|
||||||
libcrypto = CDLL(libcrypto)
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
RSA_NO_PADDING = 3
|
|
||||||
AES_MAXNR = 14
|
AES_MAXNR = 14
|
||||||
|
|
||||||
c_char_pp = POINTER(c_char_p)
|
c_char_pp = POINTER(c_char_p)
|
||||||
c_int_p = POINTER(c_int)
|
c_int_p = POINTER(c_int)
|
||||||
|
|
||||||
class RSA(Structure):
|
|
||||||
pass
|
|
||||||
RSA_p = POINTER(RSA)
|
|
||||||
|
|
||||||
class AES_KEY(Structure):
|
class AES_KEY(Structure):
|
||||||
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
|
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
|
||||||
('rounds', c_int)]
|
('rounds', c_int)]
|
||||||
|
@ -152,312 +140,80 @@ def _load_crypto_libcrypto():
|
||||||
func.argtypes = argtypes
|
func.argtypes = argtypes
|
||||||
return func
|
return func
|
||||||
|
|
||||||
d2i_RSAPrivateKey = F(RSA_p, 'd2i_RSAPrivateKey',
|
AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key',
|
||||||
[RSA_p, c_char_pp, c_long])
|
|
||||||
RSA_size = F(c_int, 'RSA_size', [RSA_p])
|
|
||||||
RSA_private_decrypt = F(c_int, 'RSA_private_decrypt',
|
|
||||||
[c_int, c_char_p, c_char_p, RSA_p, c_int])
|
|
||||||
RSA_free = F(None, 'RSA_free', [RSA_p])
|
|
||||||
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
|
|
||||||
[c_char_p, c_int, AES_KEY_p])
|
[c_char_p, c_int, AES_KEY_p])
|
||||||
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
|
||||||
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
|
||||||
c_int])
|
c_int])
|
||||||
|
|
||||||
class RSA(object):
|
|
||||||
def __init__(self, der):
|
|
||||||
buf = create_string_buffer(der)
|
|
||||||
pp = c_char_pp(cast(buf, c_char_p))
|
|
||||||
rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der))
|
|
||||||
if rsa is None:
|
|
||||||
raise ADEPTError('Error parsing ADEPT user key DER')
|
|
||||||
|
|
||||||
def decrypt(self, from_):
|
|
||||||
rsa = self._rsa
|
|
||||||
to = create_string_buffer(RSA_size(rsa))
|
|
||||||
dlen = RSA_private_decrypt(len(from_), from_, to, rsa,
|
|
||||||
RSA_NO_PADDING)
|
|
||||||
if dlen < 0:
|
|
||||||
raise ADEPTError('RSA decryption failed')
|
|
||||||
return to[:dlen]
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
if self._rsa is not None:
|
|
||||||
RSA_free(self._rsa)
|
|
||||||
self._rsa = None
|
|
||||||
|
|
||||||
class AES(object):
|
class AES(object):
|
||||||
def __init__(self, userkey):
|
def __init__(self, userkey, iv):
|
||||||
self._blocksize = len(userkey)
|
self._blocksize = len(userkey)
|
||||||
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
|
self._iv = iv
|
||||||
raise ADEPTError('AES improper key used')
|
|
||||||
return
|
|
||||||
key = self._key = AES_KEY()
|
key = self._key = AES_KEY()
|
||||||
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
|
rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key)
|
||||||
if rv < 0:
|
if rv < 0:
|
||||||
raise ADEPTError('Failed to initialize AES key')
|
raise IGNOBLEError('Failed to initialize AES Encrypt key')
|
||||||
|
|
||||||
def decrypt(self, data):
|
def encrypt(self, data):
|
||||||
out = create_string_buffer(len(data))
|
out = create_string_buffer(len(data))
|
||||||
iv = ("\x00" * self._blocksize)
|
rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1)
|
||||||
rv = AES_cbc_encrypt(data, out, len(data), self._key, iv, 0)
|
|
||||||
if rv == 0:
|
if rv == 0:
|
||||||
raise ADEPTError('AES decryption failed')
|
raise IGNOBLEError('AES encryption failed')
|
||||||
return out.raw
|
return out.raw
|
||||||
|
|
||||||
return (AES, RSA)
|
return AES
|
||||||
|
|
||||||
def _load_crypto_pycrypto():
|
def _load_crypto_pycrypto():
|
||||||
from Crypto.Cipher import AES as _AES
|
from Crypto.Cipher import AES as _AES
|
||||||
from Crypto.PublicKey import RSA as _RSA
|
|
||||||
|
|
||||||
# ASN.1 parsing code from tlslite
|
|
||||||
class ASN1Error(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class ASN1Parser(object):
|
|
||||||
class Parser(object):
|
|
||||||
def __init__(self, bytes):
|
|
||||||
self.bytes = bytes
|
|
||||||
self.index = 0
|
|
||||||
|
|
||||||
def get(self, length):
|
|
||||||
if self.index + length > len(self.bytes):
|
|
||||||
raise ASN1Error("Error decoding ASN.1")
|
|
||||||
x = 0
|
|
||||||
for count in range(length):
|
|
||||||
x <<= 8
|
|
||||||
x |= self.bytes[self.index]
|
|
||||||
self.index += 1
|
|
||||||
return x
|
|
||||||
|
|
||||||
def getFixBytes(self, lengthBytes):
|
|
||||||
bytes = self.bytes[self.index : self.index+lengthBytes]
|
|
||||||
self.index += lengthBytes
|
|
||||||
return bytes
|
|
||||||
|
|
||||||
def getVarBytes(self, lengthLength):
|
|
||||||
lengthBytes = self.get(lengthLength)
|
|
||||||
return self.getFixBytes(lengthBytes)
|
|
||||||
|
|
||||||
def getFixList(self, length, lengthList):
|
|
||||||
l = [0] * lengthList
|
|
||||||
for x in range(lengthList):
|
|
||||||
l[x] = self.get(length)
|
|
||||||
return l
|
|
||||||
|
|
||||||
def getVarList(self, length, lengthLength):
|
|
||||||
lengthList = self.get(lengthLength)
|
|
||||||
if lengthList % length != 0:
|
|
||||||
raise ASN1Error("Error decoding ASN.1")
|
|
||||||
lengthList = int(lengthList/length)
|
|
||||||
l = [0] * lengthList
|
|
||||||
for x in range(lengthList):
|
|
||||||
l[x] = self.get(length)
|
|
||||||
return l
|
|
||||||
|
|
||||||
def startLengthCheck(self, lengthLength):
|
|
||||||
self.lengthCheck = self.get(lengthLength)
|
|
||||||
self.indexCheck = self.index
|
|
||||||
|
|
||||||
def setLengthCheck(self, length):
|
|
||||||
self.lengthCheck = length
|
|
||||||
self.indexCheck = self.index
|
|
||||||
|
|
||||||
def stopLengthCheck(self):
|
|
||||||
if (self.index - self.indexCheck) != self.lengthCheck:
|
|
||||||
raise ASN1Error("Error decoding ASN.1")
|
|
||||||
|
|
||||||
def atLengthCheck(self):
|
|
||||||
if (self.index - self.indexCheck) < self.lengthCheck:
|
|
||||||
return False
|
|
||||||
elif (self.index - self.indexCheck) == self.lengthCheck:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
raise ASN1Error("Error decoding ASN.1")
|
|
||||||
|
|
||||||
def __init__(self, bytes):
|
|
||||||
p = self.Parser(bytes)
|
|
||||||
p.get(1)
|
|
||||||
self.length = self._getASN1Length(p)
|
|
||||||
self.value = p.getFixBytes(self.length)
|
|
||||||
|
|
||||||
def getChild(self, which):
|
|
||||||
p = self.Parser(self.value)
|
|
||||||
for x in range(which+1):
|
|
||||||
markIndex = p.index
|
|
||||||
p.get(1)
|
|
||||||
length = self._getASN1Length(p)
|
|
||||||
p.getFixBytes(length)
|
|
||||||
return ASN1Parser(p.bytes[markIndex:p.index])
|
|
||||||
|
|
||||||
def _getASN1Length(self, p):
|
|
||||||
firstLength = p.get(1)
|
|
||||||
if firstLength<=127:
|
|
||||||
return firstLength
|
|
||||||
else:
|
|
||||||
lengthLength = firstLength & 0x7F
|
|
||||||
return p.get(lengthLength)
|
|
||||||
|
|
||||||
class AES(object):
|
class AES(object):
|
||||||
def __init__(self, key):
|
def __init__(self, key, iv):
|
||||||
self._aes = _AES.new(key, _AES.MODE_CBC, '\x00'*16)
|
self._aes = _AES.new(key, _AES.MODE_CBC, iv)
|
||||||
|
|
||||||
def decrypt(self, data):
|
def encrypt(self, data):
|
||||||
return self._aes.decrypt(data)
|
return self._aes.encrypt(data)
|
||||||
|
|
||||||
class RSA(object):
|
return AES
|
||||||
def __init__(self, der):
|
|
||||||
key = ASN1Parser([ord(x) for x in der])
|
|
||||||
key = [key.getChild(x).value for x in xrange(1, 4)]
|
|
||||||
key = [self.bytesToNumber(v) for v in key]
|
|
||||||
self._rsa = _RSA.construct(key)
|
|
||||||
|
|
||||||
def bytesToNumber(self, bytes):
|
|
||||||
total = 0L
|
|
||||||
for byte in bytes:
|
|
||||||
total = (total << 8) + byte
|
|
||||||
return total
|
|
||||||
|
|
||||||
def decrypt(self, data):
|
|
||||||
return self._rsa.decrypt(data)
|
|
||||||
|
|
||||||
return (AES, RSA)
|
|
||||||
|
|
||||||
def _load_crypto():
|
def _load_crypto():
|
||||||
AES = RSA = None
|
AES = None
|
||||||
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
cryptolist = (_load_crypto_libcrypto, _load_crypto_pycrypto)
|
||||||
if sys.platform.startswith('win'):
|
if sys.platform.startswith('win'):
|
||||||
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
cryptolist = (_load_crypto_pycrypto, _load_crypto_libcrypto)
|
||||||
for loader in cryptolist:
|
for loader in cryptolist:
|
||||||
try:
|
try:
|
||||||
AES, RSA = loader()
|
AES = loader()
|
||||||
break
|
break
|
||||||
except (ImportError, ADEPTError):
|
except (ImportError, IGNOBLEError):
|
||||||
pass
|
pass
|
||||||
return (AES, RSA)
|
return AES
|
||||||
|
|
||||||
AES, RSA = _load_crypto()
|
AES = _load_crypto()
|
||||||
|
|
||||||
META_NAMES = ('mimetype', 'META-INF/rights.xml', 'META-INF/encryption.xml')
|
def normalize_name(name):
|
||||||
NSMAP = {'adept': 'http://ns.adobe.com/adept',
|
return ''.join(x for x in name.lower() if x != ' ')
|
||||||
'enc': 'http://www.w3.org/2001/04/xmlenc#'}
|
|
||||||
|
|
||||||
class Decryptor(object):
|
|
||||||
def __init__(self, bookkey, encryption):
|
|
||||||
enc = lambda tag: '{%s}%s' % (NSMAP['enc'], tag)
|
|
||||||
self._aes = AES(bookkey)
|
|
||||||
encryption = etree.fromstring(encryption)
|
|
||||||
self._encrypted = encrypted = set()
|
|
||||||
expr = './%s/%s/%s' % (enc('EncryptedData'), enc('CipherData'),
|
|
||||||
enc('CipherReference'))
|
|
||||||
for elem in encryption.findall(expr):
|
|
||||||
path = elem.get('URI', None)
|
|
||||||
if path is not None:
|
|
||||||
path = path.encode('utf-8')
|
|
||||||
encrypted.add(path)
|
|
||||||
|
|
||||||
def decompress(self, bytes):
|
def generate_key(name, ccn):
|
||||||
dc = zlib.decompressobj(-15)
|
# remove spaces and case from name and CC numbers.
|
||||||
bytes = dc.decompress(bytes)
|
if type(name)==unicode:
|
||||||
ex = dc.decompress('Z') + dc.flush()
|
name = name.encode('utf-8')
|
||||||
if ex:
|
if type(ccn)==unicode:
|
||||||
bytes = bytes + ex
|
ccn = ccn.encode('utf-8')
|
||||||
return bytes
|
|
||||||
|
|
||||||
def decrypt(self, path, data):
|
name = normalize_name(name) + '\x00'
|
||||||
if path in self._encrypted:
|
ccn = normalize_name(ccn) + '\x00'
|
||||||
data = self._aes.decrypt(data)[16:]
|
|
||||||
data = data[:-ord(data[-1])]
|
name_sha = hashlib.sha1(name).digest()[:16]
|
||||||
data = self.decompress(data)
|
ccn_sha = hashlib.sha1(ccn).digest()[:16]
|
||||||
return data
|
both_sha = hashlib.sha1(name + ccn).digest()
|
||||||
|
aes = AES(ccn_sha, name_sha)
|
||||||
|
crypt = aes.encrypt(both_sha + ('\x0c' * 0x0c))
|
||||||
|
userkey = hashlib.sha1(crypt).digest()
|
||||||
|
return userkey.encode('base64')
|
||||||
|
|
||||||
# check file to make check whether it's probably an Adobe Adept encrypted ePub
|
|
||||||
def adeptBook(inpath):
|
|
||||||
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
|
||||||
namelist = set(inf.namelist())
|
|
||||||
if 'META-INF/rights.xml' not in namelist or \
|
|
||||||
'META-INF/encryption.xml' not in namelist:
|
|
||||||
return False
|
|
||||||
try:
|
|
||||||
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
|
||||||
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
|
||||||
expr = './/%s' % (adept('encryptedKey'),)
|
|
||||||
bookkey = ''.join(rights.findtext(expr))
|
|
||||||
if len(bookkey) == 172:
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
# if we couldn't check, assume it is
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def decryptBook(userkey, inpath, outpath):
|
|
||||||
if AES is None:
|
|
||||||
raise ADEPTError(u"PyCrypto or OpenSSL must be installed.")
|
|
||||||
rsa = RSA(userkey)
|
|
||||||
with closing(ZipFile(open(inpath, 'rb'))) as inf:
|
|
||||||
namelist = set(inf.namelist())
|
|
||||||
if 'META-INF/rights.xml' not in namelist or \
|
|
||||||
'META-INF/encryption.xml' not in namelist:
|
|
||||||
print u"{0:s} is DRM-free.".format(os.path.basename(inpath))
|
|
||||||
return 1
|
|
||||||
for name in META_NAMES:
|
|
||||||
namelist.remove(name)
|
|
||||||
try:
|
|
||||||
rights = etree.fromstring(inf.read('META-INF/rights.xml'))
|
|
||||||
adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
|
|
||||||
expr = './/%s' % (adept('encryptedKey'),)
|
|
||||||
bookkey = ''.join(rights.findtext(expr))
|
|
||||||
if len(bookkey) != 172:
|
|
||||||
print u"{0:s} is not a secure Adobe Adept ePub.".format(os.path.basename(inpath))
|
|
||||||
return 1
|
|
||||||
bookkey = rsa.decrypt(bookkey.decode('base64'))
|
|
||||||
# Padded as per RSAES-PKCS1-v1_5
|
|
||||||
if bookkey[-17] != '\x00':
|
|
||||||
print u"Could not decrypt {0:s}. Wrong key".format(os.path.basename(inpath))
|
|
||||||
return 2
|
|
||||||
encryption = inf.read('META-INF/encryption.xml')
|
|
||||||
decryptor = Decryptor(bookkey[-16:], encryption)
|
|
||||||
kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
|
|
||||||
with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
|
|
||||||
zi = ZipInfo('mimetype')
|
|
||||||
zi.compress_type=ZIP_STORED
|
|
||||||
try:
|
|
||||||
# if the mimetype is present, get its info, including time-stamp
|
|
||||||
oldzi = inf.getinfo('mimetype')
|
|
||||||
# copy across fields to be preserved
|
|
||||||
zi.date_time = oldzi.date_time
|
|
||||||
zi.comment = oldzi.comment
|
|
||||||
zi.extra = oldzi.extra
|
|
||||||
zi.internal_attr = oldzi.internal_attr
|
|
||||||
# external attributes are dependent on the create system, so copy both.
|
|
||||||
zi.external_attr = oldzi.external_attr
|
|
||||||
zi.create_system = oldzi.create_system
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
outf.writestr(zi, inf.read('mimetype'))
|
|
||||||
for path in namelist:
|
|
||||||
data = inf.read(path)
|
|
||||||
zi = ZipInfo(path)
|
|
||||||
zi.compress_type=ZIP_DEFLATED
|
|
||||||
try:
|
|
||||||
# get the file info, including time-stamp
|
|
||||||
oldzi = inf.getinfo(path)
|
|
||||||
# copy across useful fields
|
|
||||||
zi.date_time = oldzi.date_time
|
|
||||||
zi.comment = oldzi.comment
|
|
||||||
zi.extra = oldzi.extra
|
|
||||||
zi.internal_attr = oldzi.internal_attr
|
|
||||||
# external attributes are dependent on the create system, so copy both.
|
|
||||||
zi.external_attr = oldzi.external_attr
|
|
||||||
zi.create_system = oldzi.create_system
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
outf.writestr(zi, decryptor.decrypt(path, data))
|
|
||||||
except:
|
|
||||||
print u"Could not decrypt {0:s} because of an exception:\n{1:s}".format(os.path.basename(inpath), traceback.format_exc())
|
|
||||||
return 2
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def cli_main():
|
def cli_main():
|
||||||
|
@ -465,15 +221,19 @@ def cli_main():
|
||||||
sys.stderr=SafeUnbuffered(sys.stderr)
|
sys.stderr=SafeUnbuffered(sys.stderr)
|
||||||
argv=unicode_argv()
|
argv=unicode_argv()
|
||||||
progname = os.path.basename(argv[0])
|
progname = os.path.basename(argv[0])
|
||||||
if len(argv) != 4:
|
if AES is None:
|
||||||
print u"usage: {0} <keyfile.der> <inbook.epub> <outbook.epub>".format(progname)
|
print "%s: This script requires OpenSSL or PyCrypto, which must be installed " \
|
||||||
|
"separately. Read the top-of-script comment for details." % \
|
||||||
|
(progname,)
|
||||||
return 1
|
return 1
|
||||||
keypath, inpath, outpath = argv[1:]
|
if len(argv) != 4:
|
||||||
userkey = open(keypath,'rb').read()
|
print u"usage: {0} <Name> <CC#> <keyfileout.b64>".format(progname)
|
||||||
result = decryptBook(userkey, inpath, outpath)
|
return 1
|
||||||
if result == 0:
|
name, ccn, keypath = argv[1:]
|
||||||
print u"Successfully decrypted {0:s} as {1:s}".format(os.path.basename(inpath),os.path.basename(outpath))
|
userkey = generate_key(name, ccn)
|
||||||
return result
|
open(keypath,'wb').write(userkey)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def gui_main():
|
def gui_main():
|
||||||
try:
|
try:
|
||||||
|
@ -487,33 +247,28 @@ def gui_main():
|
||||||
class DecryptionDialog(Tkinter.Frame):
|
class DecryptionDialog(Tkinter.Frame):
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
Tkinter.Frame.__init__(self, root, border=5)
|
Tkinter.Frame.__init__(self, root, border=5)
|
||||||
self.status = Tkinter.Label(self, text=u"Select files for decryption")
|
self.status = Tkinter.Label(self, text=u"Enter parameters")
|
||||||
self.status.pack(fill=Tkconstants.X, expand=1)
|
self.status.pack(fill=Tkconstants.X, expand=1)
|
||||||
body = Tkinter.Frame(self)
|
body = Tkinter.Frame(self)
|
||||||
body.pack(fill=Tkconstants.X, expand=1)
|
body.pack(fill=Tkconstants.X, expand=1)
|
||||||
sticky = Tkconstants.E + Tkconstants.W
|
sticky = Tkconstants.E + Tkconstants.W
|
||||||
body.grid_columnconfigure(1, weight=2)
|
body.grid_columnconfigure(1, weight=2)
|
||||||
Tkinter.Label(body, text=u"Key file").grid(row=0)
|
Tkinter.Label(body, text=u"Account Name").grid(row=0)
|
||||||
self.keypath = Tkinter.Entry(body, width=30)
|
self.name = Tkinter.Entry(body, width=40)
|
||||||
self.keypath.grid(row=0, column=1, sticky=sticky)
|
self.name.grid(row=0, column=1, sticky=sticky)
|
||||||
if os.path.exists(u"adeptkey.der"):
|
Tkinter.Label(body, text=u"CC#").grid(row=1)
|
||||||
self.keypath.insert(0, u"adeptkey.der")
|
self.ccn = Tkinter.Entry(body, width=40)
|
||||||
button = Tkinter.Button(body, text=u"...", command=self.get_keypath)
|
self.ccn.grid(row=1, column=1, sticky=sticky)
|
||||||
button.grid(row=0, column=2)
|
|
||||||
Tkinter.Label(body, text=u"Input file").grid(row=1)
|
|
||||||
self.inpath = Tkinter.Entry(body, width=30)
|
|
||||||
self.inpath.grid(row=1, column=1, sticky=sticky)
|
|
||||||
button = Tkinter.Button(body, text=u"...", command=self.get_inpath)
|
|
||||||
button.grid(row=1, column=2)
|
|
||||||
Tkinter.Label(body, text=u"Output file").grid(row=2)
|
Tkinter.Label(body, text=u"Output file").grid(row=2)
|
||||||
self.outpath = Tkinter.Entry(body, width=30)
|
self.keypath = Tkinter.Entry(body, width=40)
|
||||||
self.outpath.grid(row=2, column=1, sticky=sticky)
|
self.keypath.grid(row=2, column=1, sticky=sticky)
|
||||||
button = Tkinter.Button(body, text=u"...", command=self.get_outpath)
|
self.keypath.insert(2, u"bnepubkey.b64")
|
||||||
|
button = Tkinter.Button(body, text=u"...", command=self.get_keypath)
|
||||||
button.grid(row=2, column=2)
|
button.grid(row=2, column=2)
|
||||||
buttons = Tkinter.Frame(self)
|
buttons = Tkinter.Frame(self)
|
||||||
buttons.pack()
|
buttons.pack()
|
||||||
botton = Tkinter.Button(
|
botton = Tkinter.Button(
|
||||||
buttons, text=u"Decrypt", width=10, command=self.decrypt)
|
buttons, text=u"Generate", width=10, command=self.generate)
|
||||||
botton.pack(side=Tkconstants.LEFT)
|
botton.pack(side=Tkconstants.LEFT)
|
||||||
Tkinter.Frame(buttons, width=10).pack(side=Tkconstants.LEFT)
|
Tkinter.Frame(buttons, width=10).pack(side=Tkconstants.LEFT)
|
||||||
button = Tkinter.Button(
|
button = Tkinter.Button(
|
||||||
|
@ -521,10 +276,10 @@ def gui_main():
|
||||||
button.pack(side=Tkconstants.RIGHT)
|
button.pack(side=Tkconstants.RIGHT)
|
||||||
|
|
||||||
def get_keypath(self):
|
def get_keypath(self):
|
||||||
keypath = tkFileDialog.askopenfilename(
|
keypath = tkFileDialog.asksaveasfilename(
|
||||||
parent=None, title=u"Select Adobe Adept \'.der\' key file",
|
parent=None, title=u"Select B&N ePub key file to produce",
|
||||||
defaultextension=u".der",
|
defaultextension=u".b64",
|
||||||
filetypes=[('Adobe Adept DER-encoded files', '.der'),
|
filetypes=[('base64-encoded files', '.b64'),
|
||||||
('All Files', '.*')])
|
('All Files', '.*')])
|
||||||
if keypath:
|
if keypath:
|
||||||
keypath = os.path.normpath(keypath)
|
keypath = os.path.normpath(keypath)
|
||||||
|
@ -532,56 +287,37 @@ def gui_main():
|
||||||
self.keypath.insert(0, keypath)
|
self.keypath.insert(0, keypath)
|
||||||
return
|
return
|
||||||
|
|
||||||
def get_inpath(self):
|
def generate(self):
|
||||||
inpath = tkFileDialog.askopenfilename(
|
name = self.name.get()
|
||||||
parent=None, title=u"Select ADEPT-encrypted ePub file to decrypt",
|
ccn = self.ccn.get()
|
||||||
defaultextension=u".epub", filetypes=[('ePub files', '.epub')])
|
|
||||||
if inpath:
|
|
||||||
inpath = os.path.normpath(inpath)
|
|
||||||
self.inpath.delete(0, Tkconstants.END)
|
|
||||||
self.inpath.insert(0, inpath)
|
|
||||||
return
|
|
||||||
|
|
||||||
def get_outpath(self):
|
|
||||||
outpath = tkFileDialog.asksaveasfilename(
|
|
||||||
parent=None, title=u"Select unencrypted ePub file to produce",
|
|
||||||
defaultextension=u".epub", filetypes=[('ePub files', '.epub')])
|
|
||||||
if outpath:
|
|
||||||
outpath = os.path.normpath(outpath)
|
|
||||||
self.outpath.delete(0, Tkconstants.END)
|
|
||||||
self.outpath.insert(0, outpath)
|
|
||||||
return
|
|
||||||
|
|
||||||
def decrypt(self):
|
|
||||||
keypath = self.keypath.get()
|
keypath = self.keypath.get()
|
||||||
inpath = self.inpath.get()
|
if not name:
|
||||||
outpath = self.outpath.get()
|
self.status['text'] = u"Name not specified"
|
||||||
if not keypath or not os.path.exists(keypath):
|
|
||||||
self.status['text'] = u"Specified key file does not exist"
|
|
||||||
return
|
return
|
||||||
if not inpath or not os.path.exists(inpath):
|
if not ccn:
|
||||||
self.status['text'] = u"Specified input file does not exist"
|
self.status['text'] = u"Credit card number not specified"
|
||||||
return
|
return
|
||||||
if not outpath:
|
if not keypath:
|
||||||
self.status['text'] = u"Output file not specified"
|
self.status['text'] = u"Output keyfile path not specified"
|
||||||
return
|
return
|
||||||
if inpath == outpath:
|
self.status['text'] = u"Generating..."
|
||||||
self.status['text'] = u"Must have different input and output files"
|
|
||||||
return
|
|
||||||
userkey = open(keypath,'rb').read()
|
|
||||||
self.status['text'] = u"Decrypting..."
|
|
||||||
try:
|
try:
|
||||||
decrypt_status = decryptBook(userkey, inpath, outpath)
|
userkey = generate_key(name, ccn)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
self.status['text'] = u"Error: {0}".format(e.args[0])
|
self.status['text'] = u"Error: (0}".format(e.args[0])
|
||||||
return
|
return
|
||||||
if decrypt_status == 0:
|
open(keypath,'wb').write(userkey)
|
||||||
self.status['text'] = u"File successfully decrypted"
|
self.status['text'] = u"Keyfile successfully generated"
|
||||||
else:
|
|
||||||
self.status['text'] = u"The was an error decrypting the file."
|
|
||||||
|
|
||||||
root = Tkinter.Tk()
|
root = Tkinter.Tk()
|
||||||
root.title(u"Adobe Adept ePub Decrypter v.{0}".format(__version__))
|
if AES is None:
|
||||||
|
root.withdraw()
|
||||||
|
tkMessageBox.showerror(
|
||||||
|
"Ignoble EPUB Keyfile Generator",
|
||||||
|
"This script requires OpenSSL or PyCrypto, which must be installed "
|
||||||
|
"separately. Read the top-of-script comment for details.")
|
||||||
|
return 1
|
||||||
|
root.title(u"Barnes & Noble ePub Keyfile Generator v.{0}".format(__version__))
|
||||||
root.resizable(True, False)
|
root.resizable(True, False)
|
||||||
root.minsize(300, 0)
|
root.minsize(300, 0)
|
||||||
DecryptionDialog(root).pack(fill=Tkconstants.X, expand=1)
|
DecryptionDialog(root).pack(fill=Tkconstants.X, expand=1)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -2,266 +2,331 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import with_statement
|
from __future__ import with_statement
|
||||||
import sys
|
|
||||||
import os, csv
|
# ignobleepub.pyw, version 3.6
|
||||||
import binascii
|
# Copyright © 2009-2012 by DiapDealer et al.
|
||||||
import zlib
|
|
||||||
|
# engine to remove drm from Kindle for Mac and Kindle for PC books
|
||||||
|
# for personal use for archiving and converting your ebooks
|
||||||
|
|
||||||
|
# PLEASE DO NOT PIRATE EBOOKS!
|
||||||
|
|
||||||
|
# We want all authors and publishers, and eBook stores to live
|
||||||
|
# long and prosperous lives but at the same time we just want to
|
||||||
|
# be able to read OUR books on whatever device we want and to keep
|
||||||
|
# readable for a long, long time
|
||||||
|
|
||||||
|
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
|
||||||
|
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
|
||||||
|
# and many many others
|
||||||
|
# Special thanks to The Dark Reverser for MobiDeDrm and CMBDTC for cmbdtc_dump
|
||||||
|
# from which this script borrows most unashamedly.
|
||||||
|
|
||||||
|
|
||||||
|
# Changelog
|
||||||
|
# 1.0 - Name change to k4mobidedrm. Adds Mac support, Adds plugin code
|
||||||
|
# 1.1 - Adds support for additional kindle.info files
|
||||||
|
# 1.2 - Better error handling for older Mobipocket
|
||||||
|
# 1.3 - Don't try to decrypt Topaz books
|
||||||
|
# 1.7 - Add support for Topaz books and Kindle serial numbers. Split code.
|
||||||
|
# 1.9 - Tidy up after Topaz, minor exception changes
|
||||||
|
# 2.1 - Topaz fix and filename sanitizing
|
||||||
|
# 2.2 - Topaz Fix and minor Mac code fix
|
||||||
|
# 2.3 - More Topaz fixes
|
||||||
|
# 2.4 - K4PC/Mac key generation fix
|
||||||
|
# 2.6 - Better handling of non-K4PC/Mac ebooks
|
||||||
|
# 2.7 - Better trailing bytes handling in mobidedrm
|
||||||
|
# 2.8 - Moved parsing of kindle.info files to mac & pc util files.
|
||||||
|
# 3.1 - Updated for new calibre interface. Now __init__ in plugin.
|
||||||
|
# 3.5 - Now support Kindle for PC/Mac 1.6
|
||||||
|
# 3.6 - Even better trailing bytes handling in mobidedrm
|
||||||
|
# 3.7 - Add support for Amazon Print Replica ebooks.
|
||||||
|
# 3.8 - Improved Topaz support
|
||||||
|
# 4.1 - Improved Topaz support and faster decryption with alfcrypto
|
||||||
|
# 4.2 - Added support for Amazon's KF8 format ebooks
|
||||||
|
# 4.4 - Linux calls to Wine added, and improved configuration dialog
|
||||||
|
# 4.5 - Linux works again without Wine. Some Mac key file search changes
|
||||||
|
# 4.6 - First attempt to handle unicode properly
|
||||||
|
# 4.7 - Added timing reports, and changed search for Mac key files
|
||||||
|
# 4.8 - Much better unicode handling, matching the updated inept and ignoble scripts
|
||||||
|
# - Moved back into plugin, __init__ in plugin now only contains plugin code.
|
||||||
|
# 4.9 - Missed some invalid characters in cleanup_name
|
||||||
|
# 5.0 - Extraction of info from Kindle for PC/Mac moved into kindlekey.py
|
||||||
|
# - tweaked GetDecryptedBook interface to leave passed parameters unchanged
|
||||||
|
# 5.1 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
||||||
|
# 5.2 - Fixed error in command line processing of unicode arguments
|
||||||
|
|
||||||
|
__version__ = '5.2'
|
||||||
|
|
||||||
|
|
||||||
|
import sys, os, re
|
||||||
|
import csv
|
||||||
|
import getopt
|
||||||
import re
|
import re
|
||||||
from struct import pack, unpack, unpack_from
|
|
||||||
import traceback
|
import traceback
|
||||||
|
import time
|
||||||
|
import htmlentitydefs
|
||||||
|
import json
|
||||||
|
|
||||||
class DrmException(Exception):
|
class DrmException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
global charMap1
|
if 'calibre' in sys.modules:
|
||||||
global charMap3
|
inCalibre = True
|
||||||
global charMap4
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.dedrm import mobidedrm
|
||||||
|
from calibre_plugins.dedrm import topazextract
|
||||||
|
from calibre_plugins.dedrm import kgenpids
|
||||||
|
from calibre_plugins.dedrm import android
|
||||||
|
else:
|
||||||
|
import mobidedrm
|
||||||
|
import topazextract
|
||||||
|
import kgenpids
|
||||||
|
import android
|
||||||
|
|
||||||
|
# Wrap a stream so that output gets flushed immediately
|
||||||
|
# and also make sure that any unicode strings get
|
||||||
|
# encoded using "replace" before writing them.
|
||||||
|
class SafeUnbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
self.encoding = stream.encoding
|
||||||
|
if self.encoding == None:
|
||||||
|
self.encoding = "utf-8"
|
||||||
|
def write(self, data):
|
||||||
|
if isinstance(data,unicode):
|
||||||
|
data = data.encode(self.encoding,"replace")
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
iswindows = sys.platform.startswith('win')
|
||||||
|
isosx = sys.platform.startswith('darwin')
|
||||||
|
|
||||||
|
def unicode_argv():
|
||||||
|
if iswindows:
|
||||||
|
# Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode
|
||||||
|
# strings.
|
||||||
|
|
||||||
|
# Versions 2.x of Python don't support Unicode in sys.argv on
|
||||||
|
# Windows, with the underlying Windows API instead replacing multi-byte
|
||||||
|
# characters with '?'.
|
||||||
|
|
||||||
|
|
||||||
charMap1 = 'n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M'
|
from ctypes import POINTER, byref, cdll, c_int, windll
|
||||||
charMap3 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
|
from ctypes.wintypes import LPCWSTR, LPWSTR
|
||||||
charMap4 = 'ABCDEFGHIJKLMNPQRSTUVWXYZ123456789'
|
|
||||||
|
|
||||||
# crypto digestroutines
|
GetCommandLineW = cdll.kernel32.GetCommandLineW
|
||||||
import hashlib
|
GetCommandLineW.argtypes = []
|
||||||
|
GetCommandLineW.restype = LPCWSTR
|
||||||
|
|
||||||
def MD5(message):
|
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
|
||||||
ctx = hashlib.md5()
|
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
|
||||||
ctx.update(message)
|
CommandLineToArgvW.restype = POINTER(LPWSTR)
|
||||||
return ctx.digest()
|
|
||||||
|
|
||||||
def SHA1(message):
|
cmd = GetCommandLineW()
|
||||||
ctx = hashlib.sha1()
|
argc = c_int(0)
|
||||||
ctx.update(message)
|
argv = CommandLineToArgvW(cmd, byref(argc))
|
||||||
return ctx.digest()
|
if argc.value > 0:
|
||||||
|
# Remove Python executable and commands if present
|
||||||
|
start = argc.value - len(sys.argv)
|
||||||
|
return [argv[i] for i in
|
||||||
|
xrange(start, argc.value)]
|
||||||
|
# if we don't have any arguments at all, just pass back script name
|
||||||
|
# this should never happen
|
||||||
|
return [u"mobidedrm.py"]
|
||||||
|
else:
|
||||||
|
argvencoding = sys.stdin.encoding
|
||||||
|
if argvencoding == None:
|
||||||
|
argvencoding = "utf-8"
|
||||||
|
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
||||||
|
|
||||||
|
# cleanup unicode filenames
|
||||||
|
# borrowed from calibre from calibre/src/calibre/__init__.py
|
||||||
|
# added in removal of control (<32) chars
|
||||||
|
# and removal of . at start and end
|
||||||
|
# and with some (heavily edited) code from Paul Durrant's kindlenamer.py
|
||||||
|
def cleanup_name(name):
|
||||||
|
# substitute filename unfriendly characters
|
||||||
|
name = name.replace(u"<",u"[").replace(u">",u"]").replace(u" : ",u" – ").replace(u": ",u" – ").replace(u":",u"—").replace(u"/",u"_").replace(u"\\",u"_").replace(u"|",u"_").replace(u"\"",u"\'").replace(u"*",u"_").replace(u"?",u"")
|
||||||
|
# delete control characters
|
||||||
|
name = u"".join(char for char in name if ord(char)>=32)
|
||||||
|
# white space to single space, delete leading and trailing while space
|
||||||
|
name = re.sub(ur"\s", u" ", name).strip()
|
||||||
|
# remove leading dots
|
||||||
|
while len(name)>0 and name[0] == u".":
|
||||||
|
name = name[1:]
|
||||||
|
# remove trailing dots (Windows doesn't like them)
|
||||||
|
if name.endswith(u'.'):
|
||||||
|
name = name[:-1]
|
||||||
|
return name
|
||||||
|
|
||||||
# Encode the bytes in data with the characters in map
|
# must be passed unicode
|
||||||
def encode(data, map):
|
def unescape(text):
|
||||||
result = ''
|
def fixup(m):
|
||||||
for char in data:
|
text = m.group(0)
|
||||||
value = ord(char)
|
if text[:2] == u"&#":
|
||||||
Q = (value ^ 0x80) // len(map)
|
# character reference
|
||||||
R = value % len(map)
|
try:
|
||||||
result += map[Q]
|
if text[:3] == u"&#x":
|
||||||
result += map[R]
|
return unichr(int(text[3:-1], 16))
|
||||||
return result
|
else:
|
||||||
|
return unichr(int(text[2:-1]))
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# named entity
|
||||||
|
try:
|
||||||
|
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
return text # leave as is
|
||||||
|
return re.sub(u"&#?\w+;", fixup, text)
|
||||||
|
|
||||||
# Hash the bytes in data and then encode the digest with the characters in map
|
def GetDecryptedBook(infile, kDatabases, serials, pids, starttime = time.time()):
|
||||||
def encodeHash(data,map):
|
# handle the obvious cases at the beginning
|
||||||
return encode(MD5(data),map)
|
if not os.path.isfile(infile):
|
||||||
|
raise DrmException(u"Input file does not exist.")
|
||||||
|
|
||||||
# Decode the string in data with the characters in map. Returns the decoded bytes
|
mobi = True
|
||||||
def decode(data,map):
|
magic3 = open(infile,'rb').read(3)
|
||||||
result = ''
|
if magic3 == 'TPZ':
|
||||||
for i in range (0,len(data)-1,2):
|
mobi = False
|
||||||
high = map.find(data[i])
|
|
||||||
low = map.find(data[i+1])
|
|
||||||
if (high == -1) or (low == -1) :
|
|
||||||
break
|
|
||||||
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
|
|
||||||
result += pack('B',value)
|
|
||||||
return result
|
|
||||||
|
|
||||||
#
|
if mobi:
|
||||||
# PID generation routines
|
mb = mobidedrm.MobiBook(infile)
|
||||||
#
|
else:
|
||||||
|
mb = topazextract.TopazBook(infile)
|
||||||
|
|
||||||
# Returns two bit at offset from a bit field
|
bookname = unescape(mb.getBookTitle())
|
||||||
def getTwoBitsFromBitField(bitField,offset):
|
print u"Decrypting {1} ebook: {0}".format(bookname, mb.getBookType())
|
||||||
byteNumber = offset // 4
|
|
||||||
bitPosition = 6 - 2*(offset % 4)
|
|
||||||
return ord(bitField[byteNumber]) >> bitPosition & 3
|
|
||||||
|
|
||||||
# Returns the six bits at offset from a bit field
|
# copy list of pids
|
||||||
def getSixBitsFromBitField(bitField,offset):
|
totalpids = list(pids)
|
||||||
offset *= 3
|
# extend PID list with book-specific PIDs
|
||||||
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
|
md1, md2 = mb.getPIDMetaInfo()
|
||||||
return value
|
totalpids.extend(kgenpids.getPidList(md1, md2, serials, kDatabases))
|
||||||
|
print u"Found {1:d} keys to try after {0:.1f} seconds".format(time.time()-starttime, len(totalpids))
|
||||||
# 8 bits to six bits encoding from hash to generate PID string
|
|
||||||
def encodePID(hash):
|
|
||||||
global charMap3
|
|
||||||
PID = ''
|
|
||||||
for position in range (0,8):
|
|
||||||
PID += charMap3[getSixBitsFromBitField(hash,position)]
|
|
||||||
return PID
|
|
||||||
|
|
||||||
# Encryption table used to generate the device PID
|
|
||||||
def generatePidEncryptionTable() :
|
|
||||||
table = []
|
|
||||||
for counter1 in range (0,0x100):
|
|
||||||
value = counter1
|
|
||||||
for counter2 in range (0,8):
|
|
||||||
if (value & 1 == 0) :
|
|
||||||
value = value >> 1
|
|
||||||
else :
|
|
||||||
value = value >> 1
|
|
||||||
value = value ^ 0xEDB88320
|
|
||||||
table.append(value)
|
|
||||||
return table
|
|
||||||
|
|
||||||
# Seed value used to generate the device PID
|
|
||||||
def generatePidSeed(table,dsn) :
|
|
||||||
value = 0
|
|
||||||
for counter in range (0,4) :
|
|
||||||
index = (ord(dsn[counter]) ^ value) &0xFF
|
|
||||||
value = (value >> 8) ^ table[index]
|
|
||||||
return value
|
|
||||||
|
|
||||||
# Generate the device PID
|
|
||||||
def generateDevicePID(table,dsn,nbRoll):
|
|
||||||
global charMap4
|
|
||||||
seed = generatePidSeed(table,dsn)
|
|
||||||
pidAscii = ''
|
|
||||||
pid = [(seed >>24) &0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF,(seed>>24) & 0xFF,(seed >> 16) &0xff,(seed >> 8) &0xFF,(seed) & 0xFF]
|
|
||||||
index = 0
|
|
||||||
for counter in range (0,nbRoll):
|
|
||||||
pid[index] = pid[index] ^ ord(dsn[counter])
|
|
||||||
index = (index+1) %8
|
|
||||||
for counter in range (0,8):
|
|
||||||
index = ((((pid[counter] >>5) & 3) ^ pid[counter]) & 0x1f) + (pid[counter] >> 7)
|
|
||||||
pidAscii += charMap4[index]
|
|
||||||
return pidAscii
|
|
||||||
|
|
||||||
def crc32(s):
|
|
||||||
return (~binascii.crc32(s,-1))&0xFFFFFFFF
|
|
||||||
|
|
||||||
# convert from 8 digit PID to 10 digit PID with checksum
|
|
||||||
def checksumPid(s):
|
|
||||||
global charMap4
|
|
||||||
crc = crc32(s)
|
|
||||||
crc = crc ^ (crc >> 16)
|
|
||||||
res = s
|
|
||||||
l = len(charMap4)
|
|
||||||
for i in (0,1):
|
|
||||||
b = crc & 0xff
|
|
||||||
pos = (b // l) ^ (b % l)
|
|
||||||
res += charMap4[pos%l]
|
|
||||||
crc >>= 8
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
# old kindle serial number to fixed pid
|
|
||||||
def pidFromSerial(s, l):
|
|
||||||
global charMap4
|
|
||||||
crc = crc32(s)
|
|
||||||
arr1 = [0]*l
|
|
||||||
for i in xrange(len(s)):
|
|
||||||
arr1[i%l] ^= ord(s[i])
|
|
||||||
crc_bytes = [crc >> 24 & 0xff, crc >> 16 & 0xff, crc >> 8 & 0xff, crc & 0xff]
|
|
||||||
for i in xrange(l):
|
|
||||||
arr1[i] ^= crc_bytes[i&3]
|
|
||||||
pid = ""
|
|
||||||
for i in xrange(l):
|
|
||||||
b = arr1[i] & 0xff
|
|
||||||
pid+=charMap4[(b >> 7) + ((b >> 5 & 3) ^ (b & 0x1f))]
|
|
||||||
return pid
|
|
||||||
|
|
||||||
|
|
||||||
# Parse the EXTH header records and use the Kindle serial number to calculate the book pid.
|
|
||||||
def getKindlePids(rec209, token, serialnum):
|
|
||||||
pids=[]
|
|
||||||
|
|
||||||
if isinstance(serialnum,unicode):
|
|
||||||
serialnum = serialnum.encode('ascii')
|
|
||||||
|
|
||||||
# Compute book PID
|
|
||||||
pidHash = SHA1(serialnum+rec209+token)
|
|
||||||
bookPID = encodePID(pidHash)
|
|
||||||
bookPID = checksumPid(bookPID)
|
|
||||||
pids.append(bookPID)
|
|
||||||
|
|
||||||
# compute fixed pid for old pre 2.5 firmware update pid as well
|
|
||||||
kindlePID = pidFromSerial(serialnum, 7) + "*"
|
|
||||||
kindlePID = checksumPid(kindlePID)
|
|
||||||
pids.append(kindlePID)
|
|
||||||
|
|
||||||
return pids
|
|
||||||
|
|
||||||
|
|
||||||
# parse the Kindleinfo file to calculate the book pid.
|
|
||||||
|
|
||||||
keynames = ['kindle.account.tokens','kindle.cookie.item','eulaVersionAccepted','login_date','kindle.token.item','login','kindle.key.item','kindle.name.info','kindle.device.info', 'MazamaRandomNumber']
|
|
||||||
|
|
||||||
def getK4Pids(rec209, token, kindleDatabase):
|
|
||||||
global charMap1
|
|
||||||
pids = []
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Get the Mazama Random number
|
mb.processBook(totalpids)
|
||||||
MazamaRandomNumber = (kindleDatabase[1])['MazamaRandomNumber'].decode('hex').encode('ascii')
|
except:
|
||||||
|
mb.cleanup
|
||||||
|
raise
|
||||||
|
|
||||||
# Get the kindle account token
|
print u"Decryption succeeded after {0:.1f} seconds".format(time.time()-starttime)
|
||||||
kindleAccountToken = (kindleDatabase[1])['kindle.account.tokens'].decode('hex').encode('ascii')
|
return mb
|
||||||
|
|
||||||
# Get the IDString used to decode the Kindle Info file
|
|
||||||
IDString = (kindleDatabase[1])['IDString'].decode('hex').encode('ascii')
|
|
||||||
|
|
||||||
# Get the UserName stored when the Kindle Info file was decoded
|
# kDatabaseFiles is a list of files created by kindlekey
|
||||||
UserName = (kindleDatabase[1])['UserName'].decode('hex').encode('ascii')
|
def decryptBook(infile, outdir, kDatabaseFiles, serials, pids):
|
||||||
|
starttime = time.time()
|
||||||
except KeyError:
|
kDatabases = []
|
||||||
print u"Keys not found in the database {0}.".format(kindleDatabase[0])
|
for dbfile in kDatabaseFiles:
|
||||||
return pids
|
kindleDatabase = {}
|
||||||
|
|
||||||
# Get the ID string used
|
|
||||||
encodedIDString = encodeHash(IDString,charMap1)
|
|
||||||
|
|
||||||
# Get the current user name
|
|
||||||
encodedUsername = encodeHash(UserName,charMap1)
|
|
||||||
|
|
||||||
# concat, hash and encode to calculate the DSN
|
|
||||||
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
|
|
||||||
|
|
||||||
# Compute the device PID (for which I can tell, is used for nothing).
|
|
||||||
table = generatePidEncryptionTable()
|
|
||||||
devicePID = generateDevicePID(table,DSN,4)
|
|
||||||
devicePID = checksumPid(devicePID)
|
|
||||||
pids.append(devicePID)
|
|
||||||
|
|
||||||
# Compute book PIDs
|
|
||||||
|
|
||||||
# book pid
|
|
||||||
pidHash = SHA1(DSN+kindleAccountToken+rec209+token)
|
|
||||||
bookPID = encodePID(pidHash)
|
|
||||||
bookPID = checksumPid(bookPID)
|
|
||||||
pids.append(bookPID)
|
|
||||||
|
|
||||||
# variant 1
|
|
||||||
pidHash = SHA1(kindleAccountToken+rec209+token)
|
|
||||||
bookPID = encodePID(pidHash)
|
|
||||||
bookPID = checksumPid(bookPID)
|
|
||||||
pids.append(bookPID)
|
|
||||||
|
|
||||||
# variant 2
|
|
||||||
pidHash = SHA1(DSN+rec209+token)
|
|
||||||
bookPID = encodePID(pidHash)
|
|
||||||
bookPID = checksumPid(bookPID)
|
|
||||||
pids.append(bookPID)
|
|
||||||
|
|
||||||
return pids
|
|
||||||
|
|
||||||
def getPidList(md1, md2, serials=[], kDatabases=[]):
|
|
||||||
pidlst = []
|
|
||||||
|
|
||||||
if kDatabases is None:
|
|
||||||
kDatabases = []
|
|
||||||
if serials is None:
|
|
||||||
serials = []
|
|
||||||
|
|
||||||
for kDatabase in kDatabases:
|
|
||||||
try:
|
try:
|
||||||
pidlst.extend(getK4Pids(md1, md2, kDatabase))
|
with open(dbfile, 'r') as keyfilein:
|
||||||
|
kindleDatabase = json.loads(keyfilein.read())
|
||||||
|
kDatabases.append([dbfile,kindleDatabase])
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print u"Error getting PIDs from database {0}: {1}".format(kDatabase[0],e.args[0])
|
print u"Error getting database from file {0:s}: {1:s}".format(dbfile,e)
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
for serialnum in serials:
|
|
||||||
try:
|
|
||||||
pidlst.extend(getKindlePids(md1, md2, serialnum))
|
|
||||||
except Exception, e:
|
|
||||||
print u"Error getting PIDs from serial number {0}: {1}".format(serialnum ,e.args[0])
|
|
||||||
traceback.print_exc()
|
|
||||||
|
|
||||||
return pidlst
|
|
||||||
|
try:
|
||||||
|
book = GetDecryptedBook(infile, kDatabases, serials, pids, starttime)
|
||||||
|
except Exception, e:
|
||||||
|
print u"Error decrypting book after {1:.1f} seconds: {0}".format(e.args[0],time.time()-starttime)
|
||||||
|
traceback.print_exc()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# if we're saving to the same folder as the original, use file name_
|
||||||
|
# if to a different folder, use book name
|
||||||
|
if os.path.normcase(os.path.normpath(outdir)) == os.path.normcase(os.path.normpath(os.path.dirname(infile))):
|
||||||
|
outfilename = os.path.splitext(os.path.basename(infile))[0]
|
||||||
|
else:
|
||||||
|
outfilename = cleanup_name(book.getBookTitle())
|
||||||
|
|
||||||
|
# avoid excessively long file names
|
||||||
|
if len(outfilename)>150:
|
||||||
|
outfilename = outfilename[:150]
|
||||||
|
|
||||||
|
outfilename = outfilename+u"_nodrm"
|
||||||
|
outfile = os.path.join(outdir, outfilename + book.getBookExtension())
|
||||||
|
|
||||||
|
book.getFile(outfile)
|
||||||
|
print u"Saved decrypted book {1:s} after {0:.1f} seconds".format(time.time()-starttime, outfilename)
|
||||||
|
|
||||||
|
if book.getBookType()==u"Topaz":
|
||||||
|
zipname = os.path.join(outdir, outfilename + u"_SVG.zip")
|
||||||
|
book.getSVGZip(zipname)
|
||||||
|
print u"Saved SVG ZIP Archive for {1:s} after {0:.1f} seconds".format(time.time()-starttime, outfilename)
|
||||||
|
|
||||||
|
# remove internal temporary directory of Topaz pieces
|
||||||
|
book.cleanup()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def usage(progname):
|
||||||
|
print u"Removes DRM protection from Mobipocket, Amazon KF8, Amazon Print Replica and Amazon Topaz ebooks"
|
||||||
|
print u"Usage:"
|
||||||
|
print u" {0} [-k <kindle.k4i>] [-p <comma separated PIDs>] [-s <comma separated Kindle serial numbers>] [ -a <AmazonSecureStorage.xml> ] <infile> <outdir>".format(progname)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Main
|
||||||
|
#
|
||||||
|
def cli_main():
|
||||||
|
argv=unicode_argv()
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
print u"K4MobiDeDrm v{0}.\nCopyright © 2008-2013 The Dark Reverser et al.".format(__version__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(argv[1:], "k:p:s:a:")
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print u"Error in options or arguments: {0}".format(err.args[0])
|
||||||
|
usage(progname)
|
||||||
|
sys.exit(2)
|
||||||
|
if len(args)<2:
|
||||||
|
usage(progname)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
infile = args[0]
|
||||||
|
outdir = args[1]
|
||||||
|
kDatabaseFiles = []
|
||||||
|
serials = []
|
||||||
|
pids = []
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o == "-k":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -k")
|
||||||
|
kDatabaseFiles.append(a)
|
||||||
|
if o == "-p":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -p")
|
||||||
|
pids = a.split(',')
|
||||||
|
if o == "-s":
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -s")
|
||||||
|
serials = a.split(',')
|
||||||
|
if o == '-a':
|
||||||
|
if a == None:
|
||||||
|
continue
|
||||||
|
serials.extend(android.get_serials(a))
|
||||||
|
serials.extend(android.get_serials())
|
||||||
|
|
||||||
|
# try with built in Kindle Info files if not on Linux
|
||||||
|
k4 = not sys.platform.startswith('linux')
|
||||||
|
|
||||||
|
return decryptBook(infile, outdir, kDatabaseFiles, serials, pids)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.stdout=SafeUnbuffered(sys.stdout)
|
||||||
|
sys.stderr=SafeUnbuffered(sys.stderr)
|
||||||
|
sys.exit(cli_main())
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,89 +1,541 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# implement just enough of des from openssl to make erdr2pml.py happy
|
# mobidedrm.py, version 0.38
|
||||||
|
# Copyright © 2008 The Dark Reverser
|
||||||
|
#
|
||||||
|
# Modified 2008–2012 by some_updates, DiapDealer and Apprentice Alf
|
||||||
|
|
||||||
def load_libcrypto():
|
# This is a python script. You need a Python interpreter to run it.
|
||||||
from ctypes import CDLL, POINTER, c_void_p, c_char_p, c_char, c_int, c_long, \
|
# For example, ActiveState Python, which exists for windows.
|
||||||
Structure, c_ulong, create_string_buffer, cast
|
#
|
||||||
from ctypes.util import find_library
|
# Changelog
|
||||||
import sys
|
# 0.01 - Initial version
|
||||||
|
# 0.02 - Huffdic compressed books were not properly decrypted
|
||||||
|
# 0.03 - Wasn't checking MOBI header length
|
||||||
|
# 0.04 - Wasn't sanity checking size of data record
|
||||||
|
# 0.05 - It seems that the extra data flags take two bytes not four
|
||||||
|
# 0.06 - And that low bit does mean something after all :-)
|
||||||
|
# 0.07 - The extra data flags aren't present in MOBI header < 0xE8 in size
|
||||||
|
# 0.08 - ...and also not in Mobi header version < 6
|
||||||
|
# 0.09 - ...but they are there with Mobi header version 6, header size 0xE4!
|
||||||
|
# 0.10 - Outputs unencrypted files as-is, so that when run as a Calibre
|
||||||
|
# import filter it works when importing unencrypted files.
|
||||||
|
# Also now handles encrypted files that don't need a specific PID.
|
||||||
|
# 0.11 - use autoflushed stdout and proper return values
|
||||||
|
# 0.12 - Fix for problems with metadata import as Calibre plugin, report errors
|
||||||
|
# 0.13 - Formatting fixes: retabbed file, removed trailing whitespace
|
||||||
|
# and extra blank lines, converted CR/LF pairs at ends of each line,
|
||||||
|
# and other cosmetic fixes.
|
||||||
|
# 0.14 - Working out when the extra data flags are present has been problematic
|
||||||
|
# Versions 7 through 9 have tried to tweak the conditions, but have been
|
||||||
|
# only partially successful. Closer examination of lots of sample
|
||||||
|
# files reveals that a confusion has arisen because trailing data entries
|
||||||
|
# are not encrypted, but it turns out that the multibyte entries
|
||||||
|
# in utf8 file are encrypted. (Although neither kind gets compressed.)
|
||||||
|
# This knowledge leads to a simplification of the test for the
|
||||||
|
# trailing data byte flags - version 5 and higher AND header size >= 0xE4.
|
||||||
|
# 0.15 - Now outputs 'heartbeat', and is also quicker for long files.
|
||||||
|
# 0.16 - And reverts to 'done' not 'done.' at the end for unswindle compatibility.
|
||||||
|
# 0.17 - added modifications to support its use as an imported python module
|
||||||
|
# both inside calibre and also in other places (ie K4DeDRM tools)
|
||||||
|
# 0.17a- disabled the standalone plugin feature since a plugin can not import
|
||||||
|
# a plugin
|
||||||
|
# 0.18 - It seems that multibyte entries aren't encrypted in a v7 file...
|
||||||
|
# Removed the disabled Calibre plug-in code
|
||||||
|
# Permit use of 8-digit PIDs
|
||||||
|
# 0.19 - It seems that multibyte entries aren't encrypted in a v6 file either.
|
||||||
|
# 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file.
|
||||||
|
# 0.21 - Added support for multiple pids
|
||||||
|
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
|
||||||
|
# 0.23 - fixed problem with older files with no EXTH section
|
||||||
|
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
|
||||||
|
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
|
||||||
|
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
|
||||||
|
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
|
||||||
|
# 0.28 - slight additional changes to metadata token generation (None -> '')
|
||||||
|
# 0.29 - It seems that the ideas about when multibyte trailing characters were
|
||||||
|
# included in the encryption were wrong. They are for DOC compressed
|
||||||
|
# files, but they are not for HUFF/CDIC compress files!
|
||||||
|
# 0.30 - Modified interface slightly to work better with new calibre plugin style
|
||||||
|
# 0.31 - The multibyte encrytion info is true for version 7 files too.
|
||||||
|
# 0.32 - Added support for "Print Replica" Kindle ebooks
|
||||||
|
# 0.33 - Performance improvements for large files (concatenation)
|
||||||
|
# 0.34 - Performance improvements in decryption (libalfcrypto)
|
||||||
|
# 0.35 - add interface to get mobi_version
|
||||||
|
# 0.36 - fixed problem with TEXtREAd and getBookTitle interface
|
||||||
|
# 0.37 - Fixed double announcement for stand-alone operation
|
||||||
|
# 0.38 - Unicode used wherever possible, cope with absent alfcrypto
|
||||||
|
# 0.39 - Fixed problem with TEXtREAd and getBookType interface
|
||||||
|
# 0.40 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
||||||
|
# 0.41 - Fixed potential unicode problem in command line calls
|
||||||
|
|
||||||
if sys.platform.startswith('win'):
|
|
||||||
libcrypto = find_library('libeay32')
|
__version__ = u"0.41"
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import struct
|
||||||
|
import binascii
|
||||||
|
try:
|
||||||
|
from alfcrypto import Pukall_Cipher
|
||||||
|
except:
|
||||||
|
print u"AlfCrypto not found. Using python PC1 implementation."
|
||||||
|
|
||||||
|
# Wrap a stream so that output gets flushed immediately
|
||||||
|
# and also make sure that any unicode strings get
|
||||||
|
# encoded using "replace" before writing them.
|
||||||
|
class SafeUnbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
self.encoding = stream.encoding
|
||||||
|
if self.encoding == None:
|
||||||
|
self.encoding = "utf-8"
|
||||||
|
def write(self, data):
|
||||||
|
if isinstance(data,unicode):
|
||||||
|
data = data.encode(self.encoding,"replace")
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
|
iswindows = sys.platform.startswith('win')
|
||||||
|
isosx = sys.platform.startswith('darwin')
|
||||||
|
|
||||||
|
def unicode_argv():
|
||||||
|
if iswindows:
|
||||||
|
# Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode
|
||||||
|
# strings.
|
||||||
|
|
||||||
|
# Versions 2.x of Python don't support Unicode in sys.argv on
|
||||||
|
# Windows, with the underlying Windows API instead replacing multi-byte
|
||||||
|
# characters with '?'.
|
||||||
|
|
||||||
|
|
||||||
|
from ctypes import POINTER, byref, cdll, c_int, windll
|
||||||
|
from ctypes.wintypes import LPCWSTR, LPWSTR
|
||||||
|
|
||||||
|
GetCommandLineW = cdll.kernel32.GetCommandLineW
|
||||||
|
GetCommandLineW.argtypes = []
|
||||||
|
GetCommandLineW.restype = LPCWSTR
|
||||||
|
|
||||||
|
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
|
||||||
|
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
|
||||||
|
CommandLineToArgvW.restype = POINTER(LPWSTR)
|
||||||
|
|
||||||
|
cmd = GetCommandLineW()
|
||||||
|
argc = c_int(0)
|
||||||
|
argv = CommandLineToArgvW(cmd, byref(argc))
|
||||||
|
if argc.value > 0:
|
||||||
|
# Remove Python executable and commands if present
|
||||||
|
start = argc.value - len(sys.argv)
|
||||||
|
return [argv[i] for i in
|
||||||
|
xrange(start, argc.value)]
|
||||||
|
# if we don't have any arguments at all, just pass back script name
|
||||||
|
# this should never happen
|
||||||
|
return [u"mobidedrm.py"]
|
||||||
else:
|
else:
|
||||||
libcrypto = find_library('crypto')
|
argvencoding = sys.stdin.encoding
|
||||||
|
if argvencoding == None:
|
||||||
if libcrypto is None:
|
argvencoding = 'utf-8'
|
||||||
return None
|
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
||||||
|
|
||||||
libcrypto = CDLL(libcrypto)
|
|
||||||
|
|
||||||
# typedef struct DES_ks
|
|
||||||
# {
|
|
||||||
# union
|
|
||||||
# {
|
|
||||||
# DES_cblock cblock;
|
|
||||||
# /* make sure things are correct size on machines with
|
|
||||||
# * 8 byte longs */
|
|
||||||
# DES_LONG deslong[2];
|
|
||||||
# } ks[16];
|
|
||||||
# } DES_key_schedule;
|
|
||||||
|
|
||||||
# just create a big enough place to hold everything
|
|
||||||
# it will have alignment of structure so we should be okay (16 byte aligned?)
|
|
||||||
class DES_KEY_SCHEDULE(Structure):
|
|
||||||
_fields_ = [('DES_cblock1', c_char * 16),
|
|
||||||
('DES_cblock2', c_char * 16),
|
|
||||||
('DES_cblock3', c_char * 16),
|
|
||||||
('DES_cblock4', c_char * 16),
|
|
||||||
('DES_cblock5', c_char * 16),
|
|
||||||
('DES_cblock6', c_char * 16),
|
|
||||||
('DES_cblock7', c_char * 16),
|
|
||||||
('DES_cblock8', c_char * 16),
|
|
||||||
('DES_cblock9', c_char * 16),
|
|
||||||
('DES_cblock10', c_char * 16),
|
|
||||||
('DES_cblock11', c_char * 16),
|
|
||||||
('DES_cblock12', c_char * 16),
|
|
||||||
('DES_cblock13', c_char * 16),
|
|
||||||
('DES_cblock14', c_char * 16),
|
|
||||||
('DES_cblock15', c_char * 16),
|
|
||||||
('DES_cblock16', c_char * 16)]
|
|
||||||
|
|
||||||
DES_KEY_SCHEDULE_p = POINTER(DES_KEY_SCHEDULE)
|
|
||||||
|
|
||||||
def F(restype, name, argtypes):
|
|
||||||
func = getattr(libcrypto, name)
|
|
||||||
func.restype = restype
|
|
||||||
func.argtypes = argtypes
|
|
||||||
return func
|
|
||||||
|
|
||||||
DES_set_key = F(None, 'DES_set_key',[c_char_p, DES_KEY_SCHEDULE_p])
|
|
||||||
DES_ecb_encrypt = F(None, 'DES_ecb_encrypt',[c_char_p, c_char_p, DES_KEY_SCHEDULE_p, c_int])
|
|
||||||
|
|
||||||
|
|
||||||
class DES(object):
|
class DrmException(Exception):
|
||||||
def __init__(self, key):
|
pass
|
||||||
if len(key) != 8 :
|
|
||||||
raise Exception('DES improper key used')
|
|
||||||
return
|
|
||||||
self.key = key
|
|
||||||
self.keyschedule = DES_KEY_SCHEDULE()
|
|
||||||
DES_set_key(self.key, self.keyschedule)
|
|
||||||
def desdecrypt(self, data):
|
|
||||||
ob = create_string_buffer(len(data))
|
|
||||||
DES_ecb_encrypt(data, ob, self.keyschedule, 0)
|
|
||||||
return ob.raw
|
|
||||||
def decrypt(self, data):
|
|
||||||
if not data:
|
|
||||||
return ''
|
|
||||||
i = 0
|
|
||||||
result = []
|
|
||||||
while i < len(data):
|
|
||||||
block = data[i:i+8]
|
|
||||||
processed_block = self.desdecrypt(block)
|
|
||||||
result.append(processed_block)
|
|
||||||
i += 8
|
|
||||||
return ''.join(result)
|
|
||||||
|
|
||||||
return DES
|
|
||||||
|
#
|
||||||
|
# MobiBook Utility Routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# Implementation of Pukall Cipher 1
|
||||||
|
def PC1(key, src, decryption=True):
|
||||||
|
# if we can get it from alfcrypto, use that
|
||||||
|
try:
|
||||||
|
return Pukall_Cipher().PC1(key,src,decryption)
|
||||||
|
except NameError:
|
||||||
|
pass
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# use slow python version, since Pukall_Cipher didn't load
|
||||||
|
sum1 = 0;
|
||||||
|
sum2 = 0;
|
||||||
|
keyXorVal = 0;
|
||||||
|
if len(key)!=16:
|
||||||
|
DrmException (u"PC1: Bad key length")
|
||||||
|
wkey = []
|
||||||
|
for i in xrange(8):
|
||||||
|
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
|
||||||
|
dst = ""
|
||||||
|
for i in xrange(len(src)):
|
||||||
|
temp1 = 0;
|
||||||
|
byteXorVal = 0;
|
||||||
|
for j in xrange(8):
|
||||||
|
temp1 ^= wkey[j]
|
||||||
|
sum2 = (sum2+j)*20021 + sum1
|
||||||
|
sum1 = (temp1*346)&0xFFFF
|
||||||
|
sum2 = (sum2+sum1)&0xFFFF
|
||||||
|
temp1 = (temp1*20021+1)&0xFFFF
|
||||||
|
byteXorVal ^= temp1 ^ sum2
|
||||||
|
curByte = ord(src[i])
|
||||||
|
if not decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
|
||||||
|
if decryption:
|
||||||
|
keyXorVal = curByte * 257;
|
||||||
|
for j in xrange(8):
|
||||||
|
wkey[j] ^= keyXorVal;
|
||||||
|
dst+=chr(curByte)
|
||||||
|
return dst
|
||||||
|
|
||||||
|
def checksumPid(s):
|
||||||
|
letters = 'ABCDEFGHIJKLMNPQRSTUVWXYZ123456789'
|
||||||
|
crc = (~binascii.crc32(s,-1))&0xFFFFFFFF
|
||||||
|
crc = crc ^ (crc >> 16)
|
||||||
|
res = s
|
||||||
|
l = len(letters)
|
||||||
|
for i in (0,1):
|
||||||
|
b = crc & 0xff
|
||||||
|
pos = (b // l) ^ (b % l)
|
||||||
|
res += letters[pos%l]
|
||||||
|
crc >>= 8
|
||||||
|
return res
|
||||||
|
|
||||||
|
def getSizeOfTrailingDataEntries(ptr, size, flags):
|
||||||
|
def getSizeOfTrailingDataEntry(ptr, size):
|
||||||
|
bitpos, result = 0, 0
|
||||||
|
if size <= 0:
|
||||||
|
return result
|
||||||
|
while True:
|
||||||
|
v = ord(ptr[size-1])
|
||||||
|
result |= (v & 0x7F) << bitpos
|
||||||
|
bitpos += 7
|
||||||
|
size -= 1
|
||||||
|
if (v & 0x80) != 0 or (bitpos >= 28) or (size == 0):
|
||||||
|
return result
|
||||||
|
num = 0
|
||||||
|
testflags = flags >> 1
|
||||||
|
while testflags:
|
||||||
|
if testflags & 1:
|
||||||
|
num += getSizeOfTrailingDataEntry(ptr, size - num)
|
||||||
|
testflags >>= 1
|
||||||
|
# Check the low bit to see if there's multibyte data present.
|
||||||
|
# if multibyte data is included in the encryped data, we'll
|
||||||
|
# have already cleared this flag.
|
||||||
|
if flags & 1:
|
||||||
|
num += (ord(ptr[size - num - 1]) & 0x3) + 1
|
||||||
|
return num
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class MobiBook:
|
||||||
|
def loadSection(self, section):
|
||||||
|
if (section + 1 == self.num_sections):
|
||||||
|
endoff = len(self.data_file)
|
||||||
|
else:
|
||||||
|
endoff = self.sections[section + 1][0]
|
||||||
|
off = self.sections[section][0]
|
||||||
|
return self.data_file[off:endoff]
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
# to match function in Topaz book
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __init__(self, infile):
|
||||||
|
print u"MobiDeDrm v{0:s}.\nCopyright © 2008-2012 The Dark Reverser et al.".format(__version__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from alfcrypto import Pukall_Cipher
|
||||||
|
except:
|
||||||
|
print u"AlfCrypto not found. Using python PC1 implementation."
|
||||||
|
|
||||||
|
# initial sanity check on file
|
||||||
|
self.data_file = file(infile, 'rb').read()
|
||||||
|
self.mobi_data = ''
|
||||||
|
self.header = self.data_file[0:78]
|
||||||
|
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
|
||||||
|
raise DrmException(u"Invalid file format")
|
||||||
|
self.magic = self.header[0x3C:0x3C+8]
|
||||||
|
self.crypto_type = -1
|
||||||
|
|
||||||
|
# build up section offset and flag info
|
||||||
|
self.num_sections, = struct.unpack('>H', self.header[76:78])
|
||||||
|
self.sections = []
|
||||||
|
for i in xrange(self.num_sections):
|
||||||
|
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.data_file[78+i*8:78+i*8+8])
|
||||||
|
flags, val = a1, a2<<16|a3<<8|a4
|
||||||
|
self.sections.append( (offset, flags, val) )
|
||||||
|
|
||||||
|
# parse information from section 0
|
||||||
|
self.sect = self.loadSection(0)
|
||||||
|
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
|
||||||
|
self.compression, = struct.unpack('>H', self.sect[0x0:0x0+2])
|
||||||
|
|
||||||
|
# det default values before PalmDoc test
|
||||||
|
self.print_replica = False
|
||||||
|
self.extra_data_flags = 0
|
||||||
|
self.meta_array = {}
|
||||||
|
self.mobi_length = 0
|
||||||
|
self.mobi_codepage = 1252
|
||||||
|
self.mobi_version = -1
|
||||||
|
|
||||||
|
if self.magic == 'TEXtREAd':
|
||||||
|
print u"PalmDoc format book detected."
|
||||||
|
return
|
||||||
|
|
||||||
|
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
|
||||||
|
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
|
||||||
|
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
|
||||||
|
print u"MOBI header version {0:d}, header length {1:d}".format(self.mobi_version, self.mobi_length)
|
||||||
|
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
|
||||||
|
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
|
||||||
|
print u"Extra Data Flags: {0:d}".format(self.extra_data_flags)
|
||||||
|
if (self.compression != 17480):
|
||||||
|
# multibyte utf8 data is included in the encryption for PalmDoc compression
|
||||||
|
# so clear that byte so that we leave it to be decrypted.
|
||||||
|
self.extra_data_flags &= 0xFFFE
|
||||||
|
|
||||||
|
# if exth region exists parse it for metadata array
|
||||||
|
try:
|
||||||
|
exth_flag, = struct.unpack('>L', self.sect[0x80:0x84])
|
||||||
|
exth = ''
|
||||||
|
if exth_flag & 0x40:
|
||||||
|
exth = self.sect[16 + self.mobi_length:]
|
||||||
|
if (len(exth) >= 12) and (exth[:4] == 'EXTH'):
|
||||||
|
nitems, = struct.unpack('>I', exth[8:12])
|
||||||
|
pos = 12
|
||||||
|
for i in xrange(nitems):
|
||||||
|
type, size = struct.unpack('>II', exth[pos: pos + 8])
|
||||||
|
content = exth[pos + 8: pos + size]
|
||||||
|
self.meta_array[type] = content
|
||||||
|
# reset the text to speech flag and clipping limit, if present
|
||||||
|
if type == 401 and size == 9:
|
||||||
|
# set clipping limit to 100%
|
||||||
|
self.patchSection(0, '\144', 16 + self.mobi_length + pos + 8)
|
||||||
|
elif type == 404 and size == 9:
|
||||||
|
# make sure text to speech is enabled
|
||||||
|
self.patchSection(0, '\0', 16 + self.mobi_length + pos + 8)
|
||||||
|
# print type, size, content, content.encode('hex')
|
||||||
|
pos += size
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def getBookTitle(self):
|
||||||
|
codec_map = {
|
||||||
|
1252 : 'windows-1252',
|
||||||
|
65001 : 'utf-8',
|
||||||
|
}
|
||||||
|
title = ''
|
||||||
|
codec = 'windows-1252'
|
||||||
|
if self.magic == 'BOOKMOBI':
|
||||||
|
if 503 in self.meta_array:
|
||||||
|
title = self.meta_array[503]
|
||||||
|
else:
|
||||||
|
toff, tlen = struct.unpack('>II', self.sect[0x54:0x5c])
|
||||||
|
tend = toff + tlen
|
||||||
|
title = self.sect[toff:tend]
|
||||||
|
if self.mobi_codepage in codec_map.keys():
|
||||||
|
codec = codec_map[self.mobi_codepage]
|
||||||
|
if title == '':
|
||||||
|
title = self.header[:32]
|
||||||
|
title = title.split('\0')[0]
|
||||||
|
return unicode(title, codec)
|
||||||
|
|
||||||
|
def getPIDMetaInfo(self):
|
||||||
|
rec209 = ''
|
||||||
|
token = ''
|
||||||
|
if 209 in self.meta_array:
|
||||||
|
rec209 = self.meta_array[209]
|
||||||
|
data = rec209
|
||||||
|
# The 209 data comes in five byte groups. Interpret the last four bytes
|
||||||
|
# of each group as a big endian unsigned integer to get a key value
|
||||||
|
# if that key exists in the meta_array, append its contents to the token
|
||||||
|
for i in xrange(0,len(data),5):
|
||||||
|
val, = struct.unpack('>I',data[i+1:i+5])
|
||||||
|
sval = self.meta_array.get(val,'')
|
||||||
|
token += sval
|
||||||
|
return rec209, token
|
||||||
|
|
||||||
|
def patch(self, off, new):
|
||||||
|
self.data_file = self.data_file[:off] + new + self.data_file[off+len(new):]
|
||||||
|
|
||||||
|
def patchSection(self, section, new, in_off = 0):
|
||||||
|
if (section + 1 == self.num_sections):
|
||||||
|
endoff = len(self.data_file)
|
||||||
|
else:
|
||||||
|
endoff = self.sections[section + 1][0]
|
||||||
|
off = self.sections[section][0]
|
||||||
|
assert off + in_off + len(new) <= endoff
|
||||||
|
self.patch(off + in_off, new)
|
||||||
|
|
||||||
|
def parseDRM(self, data, count, pidlist):
|
||||||
|
found_key = None
|
||||||
|
keyvec1 = '\x72\x38\x33\xB0\xB4\xF2\xE3\xCA\xDF\x09\x01\xD6\xE2\xE0\x3F\x96'
|
||||||
|
for pid in pidlist:
|
||||||
|
bigpid = pid.ljust(16,'\0')
|
||||||
|
temp_key = PC1(keyvec1, bigpid, False)
|
||||||
|
temp_key_sum = sum(map(ord,temp_key)) & 0xff
|
||||||
|
found_key = None
|
||||||
|
for i in xrange(count):
|
||||||
|
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
|
||||||
|
if cksum == temp_key_sum:
|
||||||
|
cookie = PC1(temp_key, cookie)
|
||||||
|
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
|
||||||
|
if verification == ver and (flags & 0x1F) == 1:
|
||||||
|
found_key = finalkey
|
||||||
|
break
|
||||||
|
if found_key != None:
|
||||||
|
break
|
||||||
|
if not found_key:
|
||||||
|
# Then try the default encoding that doesn't require a PID
|
||||||
|
pid = '00000000'
|
||||||
|
temp_key = keyvec1
|
||||||
|
temp_key_sum = sum(map(ord,temp_key)) & 0xff
|
||||||
|
for i in xrange(count):
|
||||||
|
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
|
||||||
|
if cksum == temp_key_sum:
|
||||||
|
cookie = PC1(temp_key, cookie)
|
||||||
|
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
|
||||||
|
if verification == ver:
|
||||||
|
found_key = finalkey
|
||||||
|
break
|
||||||
|
return [found_key,pid]
|
||||||
|
|
||||||
|
def getFile(self, outpath):
|
||||||
|
file(outpath,'wb').write(self.mobi_data)
|
||||||
|
|
||||||
|
def getBookType(self):
|
||||||
|
if self.print_replica:
|
||||||
|
return u"Print Replica"
|
||||||
|
if self.mobi_version >= 8:
|
||||||
|
return u"Kindle Format 8"
|
||||||
|
if self.mobi_version >= 0:
|
||||||
|
return u"Mobipocket {0:d}".format(self.mobi_version)
|
||||||
|
return u"PalmDoc"
|
||||||
|
|
||||||
|
def getBookExtension(self):
|
||||||
|
if self.print_replica:
|
||||||
|
return u".azw4"
|
||||||
|
if self.mobi_version >= 8:
|
||||||
|
return u".azw3"
|
||||||
|
return u".mobi"
|
||||||
|
|
||||||
|
def processBook(self, pidlist):
|
||||||
|
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
|
||||||
|
print u"Crypto Type is: {0:d}".format(crypto_type)
|
||||||
|
self.crypto_type = crypto_type
|
||||||
|
if crypto_type == 0:
|
||||||
|
print u"This book is not encrypted."
|
||||||
|
# we must still check for Print Replica
|
||||||
|
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
|
||||||
|
self.mobi_data = self.data_file
|
||||||
|
return
|
||||||
|
if crypto_type != 2 and crypto_type != 1:
|
||||||
|
raise DrmException(u"Cannot decode unknown Mobipocket encryption type {0:d}".format(crypto_type))
|
||||||
|
if 406 in self.meta_array:
|
||||||
|
data406 = self.meta_array[406]
|
||||||
|
val406, = struct.unpack('>Q',data406)
|
||||||
|
if val406 != 0:
|
||||||
|
raise DrmException(u"Cannot decode library or rented ebooks.")
|
||||||
|
|
||||||
|
goodpids = []
|
||||||
|
for pid in pidlist:
|
||||||
|
if len(pid)==10:
|
||||||
|
if checksumPid(pid[0:-2]) != pid:
|
||||||
|
print u"Warning: PID {0} has incorrect checksum, should have been {1}".format(pid,checksumPid(pid[0:-2]))
|
||||||
|
goodpids.append(pid[0:-2])
|
||||||
|
elif len(pid)==8:
|
||||||
|
goodpids.append(pid)
|
||||||
|
|
||||||
|
if self.crypto_type == 1:
|
||||||
|
t1_keyvec = 'QDCVEPMU675RUBSZ'
|
||||||
|
if self.magic == 'TEXtREAd':
|
||||||
|
bookkey_data = self.sect[0x0E:0x0E+16]
|
||||||
|
elif self.mobi_version < 0:
|
||||||
|
bookkey_data = self.sect[0x90:0x90+16]
|
||||||
|
else:
|
||||||
|
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32]
|
||||||
|
pid = '00000000'
|
||||||
|
found_key = PC1(t1_keyvec, bookkey_data)
|
||||||
|
else :
|
||||||
|
# calculate the keys
|
||||||
|
drm_ptr, drm_count, drm_size, drm_flags = struct.unpack('>LLLL', self.sect[0xA8:0xA8+16])
|
||||||
|
if drm_count == 0:
|
||||||
|
raise DrmException(u"Encryption not initialised. Must be opened with Mobipocket Reader first.")
|
||||||
|
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
|
||||||
|
if not found_key:
|
||||||
|
raise DrmException(u"No key found in {0:d} keys tried.".format(len(goodpids)))
|
||||||
|
# kill the drm keys
|
||||||
|
self.patchSection(0, '\0' * drm_size, drm_ptr)
|
||||||
|
# kill the drm pointers
|
||||||
|
self.patchSection(0, '\xff' * 4 + '\0' * 12, 0xA8)
|
||||||
|
|
||||||
|
if pid=='00000000':
|
||||||
|
print u"File has default encryption, no specific key needed."
|
||||||
|
else:
|
||||||
|
print u"File is encoded with PID {0}.".format(checksumPid(pid))
|
||||||
|
|
||||||
|
# clear the crypto type
|
||||||
|
self.patchSection(0, "\0" * 2, 0xC)
|
||||||
|
|
||||||
|
# decrypt sections
|
||||||
|
print u"Decrypting. Please wait . . .",
|
||||||
|
mobidataList = []
|
||||||
|
mobidataList.append(self.data_file[:self.sections[1][0]])
|
||||||
|
for i in xrange(1, self.records+1):
|
||||||
|
data = self.loadSection(i)
|
||||||
|
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
|
||||||
|
if i%100 == 0:
|
||||||
|
print u".",
|
||||||
|
# print "record %d, extra_size %d" %(i,extra_size)
|
||||||
|
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
|
||||||
|
if i==1:
|
||||||
|
self.print_replica = (decoded_data[0:4] == '%MOP')
|
||||||
|
mobidataList.append(decoded_data)
|
||||||
|
if extra_size > 0:
|
||||||
|
mobidataList.append(data[-extra_size:])
|
||||||
|
if self.num_sections > self.records+1:
|
||||||
|
mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
|
||||||
|
self.mobi_data = "".join(mobidataList)
|
||||||
|
print u"done"
|
||||||
|
return
|
||||||
|
|
||||||
|
def getUnencryptedBook(infile,pidlist):
|
||||||
|
if not os.path.isfile(infile):
|
||||||
|
raise DrmException(u"Input File Not Found.")
|
||||||
|
book = MobiBook(infile)
|
||||||
|
book.processBook(pidlist)
|
||||||
|
return book.mobi_data
|
||||||
|
|
||||||
|
|
||||||
|
def cli_main():
|
||||||
|
argv=unicode_argv()
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
if len(argv)<3 or len(argv)>4:
|
||||||
|
print u"MobiDeDrm v{0}.\nCopyright © 2008-2012 The Dark Reverser et al.".format(__version__)
|
||||||
|
print u"Removes protection from Kindle/Mobipocket, Kindle/KF8 and Kindle/Print Replica ebooks"
|
||||||
|
print u"Usage:"
|
||||||
|
print u" {0} <infile> <outfile> [<Comma separated list of PIDs to try>]".format(progname)
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
infile = argv[1]
|
||||||
|
outfile = argv[2]
|
||||||
|
if len(argv) is 4:
|
||||||
|
pidlist = argv[3].split(',')
|
||||||
|
else:
|
||||||
|
pidlist = []
|
||||||
|
try:
|
||||||
|
stripped_file = getUnencryptedBook(infile, pidlist)
|
||||||
|
file(outfile, 'wb').write(stripped_file)
|
||||||
|
except DrmException, e:
|
||||||
|
print u"MobiDeDRM v{0} Error: {0:s}".format(__version__,e.args[0])
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.stdout=SafeUnbuffered(sys.stdout)
|
||||||
|
sys.stderr=SafeUnbuffered(sys.stderr)
|
||||||
|
sys.exit(cli_main())
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
# implement just enough of des from openssl to make erdr2pml.py happy
|
||||||
|
|
||||||
|
def load_libcrypto():
|
||||||
|
from ctypes import CDLL, POINTER, c_void_p, c_char_p, c_char, c_int, c_long, \
|
||||||
|
Structure, c_ulong, create_string_buffer, cast
|
||||||
|
from ctypes.util import find_library
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
libcrypto = find_library('libeay32')
|
||||||
|
else:
|
||||||
|
libcrypto = find_library('crypto')
|
||||||
|
|
||||||
|
if libcrypto is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
libcrypto = CDLL(libcrypto)
|
||||||
|
|
||||||
|
# typedef struct DES_ks
|
||||||
|
# {
|
||||||
|
# union
|
||||||
|
# {
|
||||||
|
# DES_cblock cblock;
|
||||||
|
# /* make sure things are correct size on machines with
|
||||||
|
# * 8 byte longs */
|
||||||
|
# DES_LONG deslong[2];
|
||||||
|
# } ks[16];
|
||||||
|
# } DES_key_schedule;
|
||||||
|
|
||||||
|
# just create a big enough place to hold everything
|
||||||
|
# it will have alignment of structure so we should be okay (16 byte aligned?)
|
||||||
|
class DES_KEY_SCHEDULE(Structure):
|
||||||
|
_fields_ = [('DES_cblock1', c_char * 16),
|
||||||
|
('DES_cblock2', c_char * 16),
|
||||||
|
('DES_cblock3', c_char * 16),
|
||||||
|
('DES_cblock4', c_char * 16),
|
||||||
|
('DES_cblock5', c_char * 16),
|
||||||
|
('DES_cblock6', c_char * 16),
|
||||||
|
('DES_cblock7', c_char * 16),
|
||||||
|
('DES_cblock8', c_char * 16),
|
||||||
|
('DES_cblock9', c_char * 16),
|
||||||
|
('DES_cblock10', c_char * 16),
|
||||||
|
('DES_cblock11', c_char * 16),
|
||||||
|
('DES_cblock12', c_char * 16),
|
||||||
|
('DES_cblock13', c_char * 16),
|
||||||
|
('DES_cblock14', c_char * 16),
|
||||||
|
('DES_cblock15', c_char * 16),
|
||||||
|
('DES_cblock16', c_char * 16)]
|
||||||
|
|
||||||
|
DES_KEY_SCHEDULE_p = POINTER(DES_KEY_SCHEDULE)
|
||||||
|
|
||||||
|
def F(restype, name, argtypes):
|
||||||
|
func = getattr(libcrypto, name)
|
||||||
|
func.restype = restype
|
||||||
|
func.argtypes = argtypes
|
||||||
|
return func
|
||||||
|
|
||||||
|
DES_set_key = F(None, 'DES_set_key',[c_char_p, DES_KEY_SCHEDULE_p])
|
||||||
|
DES_ecb_encrypt = F(None, 'DES_ecb_encrypt',[c_char_p, c_char_p, DES_KEY_SCHEDULE_p, c_int])
|
||||||
|
|
||||||
|
|
||||||
|
class DES(object):
|
||||||
|
def __init__(self, key):
|
||||||
|
if len(key) != 8 :
|
||||||
|
raise Exception('DES improper key used')
|
||||||
|
return
|
||||||
|
self.key = key
|
||||||
|
self.keyschedule = DES_KEY_SCHEDULE()
|
||||||
|
DES_set_key(self.key, self.keyschedule)
|
||||||
|
def desdecrypt(self, data):
|
||||||
|
ob = create_string_buffer(len(data))
|
||||||
|
DES_ecb_encrypt(data, ob, self.keyschedule, 0)
|
||||||
|
return ob.raw
|
||||||
|
def decrypt(self, data):
|
||||||
|
if not data:
|
||||||
|
return ''
|
||||||
|
i = 0
|
||||||
|
result = []
|
||||||
|
while i < len(data):
|
||||||
|
block = data[i:i+8]
|
||||||
|
processed_block = self.desdecrypt(block)
|
||||||
|
result.append(processed_block)
|
||||||
|
i += 8
|
||||||
|
return ''.join(result)
|
||||||
|
|
||||||
|
return DES
|
|
@ -1,292 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
__license__ = 'GPL v3'
|
|
||||||
|
|
||||||
# Standard Python modules.
|
|
||||||
import os, sys, re, hashlib
|
|
||||||
import json
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
from calibre.utils.config import dynamic, config_dir, JSONConfig
|
|
||||||
from calibre_plugins.dedrm.__init__ import PLUGIN_NAME, PLUGIN_VERSION
|
|
||||||
from calibre.constants import iswindows, isosx
|
|
||||||
|
|
||||||
class DeDRM_Prefs():
|
|
||||||
def __init__(self):
|
|
||||||
JSON_PATH = os.path.join(u"plugins", PLUGIN_NAME.strip().lower().replace(' ', '_') + '.json')
|
|
||||||
self.dedrmprefs = JSONConfig(JSON_PATH)
|
|
||||||
|
|
||||||
self.dedrmprefs.defaults['configured'] = False
|
|
||||||
self.dedrmprefs.defaults['bandnkeys'] = {}
|
|
||||||
self.dedrmprefs.defaults['adeptkeys'] = {}
|
|
||||||
self.dedrmprefs.defaults['ereaderkeys'] = {}
|
|
||||||
self.dedrmprefs.defaults['kindlekeys'] = {}
|
|
||||||
self.dedrmprefs.defaults['pids'] = []
|
|
||||||
self.dedrmprefs.defaults['serials'] = []
|
|
||||||
self.dedrmprefs.defaults['adobewineprefix'] = ""
|
|
||||||
self.dedrmprefs.defaults['kindlewineprefix'] = ""
|
|
||||||
|
|
||||||
# initialise
|
|
||||||
# we must actually set the prefs that are dictionaries and lists
|
|
||||||
# to empty dictionaries and lists, otherwise we are unable to add to them
|
|
||||||
# as then it just adds to the (memory only) dedrmprefs.defaults versions!
|
|
||||||
if self.dedrmprefs['bandnkeys'] == {}:
|
|
||||||
self.dedrmprefs['bandnkeys'] = {}
|
|
||||||
if self.dedrmprefs['adeptkeys'] == {}:
|
|
||||||
self.dedrmprefs['adeptkeys'] = {}
|
|
||||||
if self.dedrmprefs['ereaderkeys'] == {}:
|
|
||||||
self.dedrmprefs['ereaderkeys'] = {}
|
|
||||||
if self.dedrmprefs['kindlekeys'] == {}:
|
|
||||||
self.dedrmprefs['kindlekeys'] = {}
|
|
||||||
if self.dedrmprefs['pids'] == []:
|
|
||||||
self.dedrmprefs['pids'] = []
|
|
||||||
if self.dedrmprefs['serials'] == []:
|
|
||||||
self.dedrmprefs['serials'] = []
|
|
||||||
|
|
||||||
def __getitem__(self,kind = None):
|
|
||||||
if kind is not None:
|
|
||||||
return self.dedrmprefs[kind]
|
|
||||||
return self.dedrmprefs
|
|
||||||
|
|
||||||
def set(self, kind, value):
|
|
||||||
self.dedrmprefs[kind] = value
|
|
||||||
|
|
||||||
def writeprefs(self,value = True):
|
|
||||||
self.dedrmprefs['configured'] = value
|
|
||||||
|
|
||||||
def addnamedvaluetoprefs(self, prefkind, keyname, keyvalue):
|
|
||||||
try:
|
|
||||||
if keyvalue not in self.dedrmprefs[prefkind].values():
|
|
||||||
# ensure that the keyname is unique
|
|
||||||
# by adding a number (starting with 2) to the name if it is not
|
|
||||||
namecount = 1
|
|
||||||
newname = keyname
|
|
||||||
while newname in self.dedrmprefs[prefkind]:
|
|
||||||
namecount += 1
|
|
||||||
newname = "{0:s}_{1:d}".format(keyname,namecount)
|
|
||||||
# add to the preferences
|
|
||||||
self.dedrmprefs[prefkind][newname] = keyvalue
|
|
||||||
return (True, newname)
|
|
||||||
except:
|
|
||||||
traceback.print_exc()
|
|
||||||
pass
|
|
||||||
return (False, keyname)
|
|
||||||
|
|
||||||
def addvaluetoprefs(self, prefkind, prefsvalue):
|
|
||||||
# ensure the keyvalue isn't already in the preferences
|
|
||||||
try:
|
|
||||||
if prefsvalue not in self.dedrmprefs[prefkind]:
|
|
||||||
self.dedrmprefs[prefkind].append(prefsvalue)
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
traceback.print_exc()
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def convertprefs(always = False):
|
|
||||||
|
|
||||||
def parseIgnobleString(keystuff):
|
|
||||||
from calibre_plugins.dedrm.ignoblekeygen import generate_key
|
|
||||||
userkeys = []
|
|
||||||
ar = keystuff.split(':')
|
|
||||||
for keystring in ar:
|
|
||||||
try:
|
|
||||||
name, ccn = keystring.split(',')
|
|
||||||
# Generate Barnes & Noble EPUB user key from name and credit card number.
|
|
||||||
keyname = u"{0}_{1}".format(name.strip(),ccn.strip()[-4:])
|
|
||||||
keyvalue = generate_key(name, ccn)
|
|
||||||
userkeys.append([keyname,keyvalue])
|
|
||||||
except Exception, e:
|
|
||||||
traceback.print_exc()
|
|
||||||
print e.args[0]
|
|
||||||
pass
|
|
||||||
return userkeys
|
|
||||||
|
|
||||||
def parseeReaderString(keystuff):
|
|
||||||
from calibre_plugins.dedrm.erdr2pml import getuser_key
|
|
||||||
userkeys = []
|
|
||||||
ar = keystuff.split(':')
|
|
||||||
for keystring in ar:
|
|
||||||
try:
|
|
||||||
name, cc = keystring.split(',')
|
|
||||||
# Generate eReader user key from name and credit card number.
|
|
||||||
keyname = u"{0}_{1}".format(name.strip(),cc.strip()[-4:])
|
|
||||||
keyvalue = getuser_key(name,cc).encode('hex')
|
|
||||||
userkeys.append([keyname,keyvalue])
|
|
||||||
except Exception, e:
|
|
||||||
traceback.print_exc()
|
|
||||||
print e.args[0]
|
|
||||||
pass
|
|
||||||
return userkeys
|
|
||||||
|
|
||||||
def parseKindleString(keystuff):
|
|
||||||
pids = []
|
|
||||||
serials = []
|
|
||||||
ar = keystuff.split(',')
|
|
||||||
for keystring in ar:
|
|
||||||
keystring = str(keystring).strip().replace(" ","")
|
|
||||||
if len(keystring) == 10 or len(keystring) == 8 and keystring not in pids:
|
|
||||||
pids.append(keystring)
|
|
||||||
elif len(keystring) == 16 and keystring[0] == 'B' and keystring not in serials:
|
|
||||||
serials.append(keystring)
|
|
||||||
return (pids,serials)
|
|
||||||
|
|
||||||
def getConfigFiles(extension, encoding = None):
|
|
||||||
# get any files with extension 'extension' in the config dir
|
|
||||||
userkeys = []
|
|
||||||
files = [f for f in os.listdir(config_dir) if f.endswith(extension)]
|
|
||||||
for filename in files:
|
|
||||||
try:
|
|
||||||
fpath = os.path.join(config_dir, filename)
|
|
||||||
key = os.path.splitext(filename)[0]
|
|
||||||
value = open(fpath, 'rb').read()
|
|
||||||
if encoding is not None:
|
|
||||||
value = value.encode(encoding)
|
|
||||||
userkeys.append([key,value])
|
|
||||||
except:
|
|
||||||
traceback.print_exc()
|
|
||||||
pass
|
|
||||||
return userkeys
|
|
||||||
|
|
||||||
dedrmprefs = DeDRM_Prefs()
|
|
||||||
|
|
||||||
if (not always) and dedrmprefs['configured']:
|
|
||||||
# We've already converted old preferences,
|
|
||||||
# and we're not being forced to do it again, so just return
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
print u"{0} v{1}: Importing configuration data from old DeDRM plugins".format(PLUGIN_NAME, PLUGIN_VERSION)
|
|
||||||
|
|
||||||
IGNOBLEPLUGINNAME = "Ignoble Epub DeDRM"
|
|
||||||
EREADERPLUGINNAME = "eReader PDB 2 PML"
|
|
||||||
OLDKINDLEPLUGINNAME = "K4PC, K4Mac, Kindle Mobi and Topaz DeDRM"
|
|
||||||
|
|
||||||
# get prefs from older tools
|
|
||||||
kindleprefs = JSONConfig(os.path.join(u"plugins", u"K4MobiDeDRM"))
|
|
||||||
ignobleprefs = JSONConfig(os.path.join(u"plugins", u"ignoble_epub_dedrm"))
|
|
||||||
|
|
||||||
# Handle the old ignoble plugin's customization string by converting the
|
|
||||||
# old string to stored keys... get that personal data out of plain sight.
|
|
||||||
from calibre.customize.ui import config
|
|
||||||
sc = config['plugin_customization']
|
|
||||||
val = sc.pop(IGNOBLEPLUGINNAME, None)
|
|
||||||
if val is not None:
|
|
||||||
print u"{0} v{1}: Converting old Ignoble plugin configuration string.".format(PLUGIN_NAME, PLUGIN_VERSION)
|
|
||||||
priorkeycount = len(dedrmprefs['bandnkeys'])
|
|
||||||
userkeys = parseIgnobleString(str(val))
|
|
||||||
for keypair in userkeys:
|
|
||||||
name = keypair[0]
|
|
||||||
value = keypair[1]
|
|
||||||
dedrmprefs.addnamedvaluetoprefs('bandnkeys', name, value)
|
|
||||||
addedkeycount = len(dedrmprefs['bandnkeys'])-priorkeycount
|
|
||||||
print u"{0} v{1}: {2:d} Barnes and Noble {3} imported from old Ignoble plugin configuration string".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"key" if addedkeycount==1 else u"keys")
|
|
||||||
# Make the json write all the prefs to disk
|
|
||||||
dedrmprefs.writeprefs(False)
|
|
||||||
|
|
||||||
# Handle the old eReader plugin's customization string by converting the
|
|
||||||
# old string to stored keys... get that personal data out of plain sight.
|
|
||||||
val = sc.pop(EREADERPLUGINNAME, None)
|
|
||||||
if val is not None:
|
|
||||||
print u"{0} v{1}: Converting old eReader plugin configuration string.".format(PLUGIN_NAME, PLUGIN_VERSION)
|
|
||||||
priorkeycount = len(dedrmprefs['ereaderkeys'])
|
|
||||||
userkeys = parseeReaderString(str(val))
|
|
||||||
for keypair in userkeys:
|
|
||||||
name = keypair[0]
|
|
||||||
value = keypair[1]
|
|
||||||
dedrmprefs.addnamedvaluetoprefs('ereaderkeys', name, value)
|
|
||||||
addedkeycount = len(dedrmprefs['ereaderkeys'])-priorkeycount
|
|
||||||
print u"{0} v{1}: {2:d} eReader {3} imported from old eReader plugin configuration string".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"key" if addedkeycount==1 else u"keys")
|
|
||||||
# Make the json write all the prefs to disk
|
|
||||||
dedrmprefs.writeprefs(False)
|
|
||||||
|
|
||||||
# get old Kindle plugin configuration string
|
|
||||||
val = sc.pop(OLDKINDLEPLUGINNAME, None)
|
|
||||||
if val is not None:
|
|
||||||
print u"{0} v{1}: Converting old Kindle plugin configuration string.".format(PLUGIN_NAME, PLUGIN_VERSION)
|
|
||||||
priorpidcount = len(dedrmprefs['pids'])
|
|
||||||
priorserialcount = len(dedrmprefs['serials'])
|
|
||||||
pids, serials = parseKindleString(val)
|
|
||||||
for pid in pids:
|
|
||||||
dedrmprefs.addvaluetoprefs('pids',pid)
|
|
||||||
for serial in serials:
|
|
||||||
dedrmprefs.addvaluetoprefs('serials',serial)
|
|
||||||
addedpidcount = len(dedrmprefs['pids']) - priorpidcount
|
|
||||||
addedserialcount = len(dedrmprefs['serials']) - priorserialcount
|
|
||||||
print u"{0} v{1}: {2:d} {3} and {4:d} {5} imported from old Kindle plugin configuration string.".format(PLUGIN_NAME, PLUGIN_VERSION, addedpidcount, u"PID" if addedpidcount==1 else u"PIDs", addedserialcount, u"serial number" if addedserialcount==1 else u"serial numbers")
|
|
||||||
# Make the json write all the prefs to disk
|
|
||||||
dedrmprefs.writeprefs(False)
|
|
||||||
|
|
||||||
# copy the customisations back into calibre preferences, as we've now removed the nasty plaintext
|
|
||||||
config['plugin_customization'] = sc
|
|
||||||
|
|
||||||
# get any .b64 files in the config dir
|
|
||||||
priorkeycount = len(dedrmprefs['bandnkeys'])
|
|
||||||
bandnfilekeys = getConfigFiles('.b64')
|
|
||||||
for keypair in bandnfilekeys:
|
|
||||||
name = keypair[0]
|
|
||||||
value = keypair[1]
|
|
||||||
dedrmprefs.addnamedvaluetoprefs('bandnkeys', name, value)
|
|
||||||
addedkeycount = len(dedrmprefs['bandnkeys'])-priorkeycount
|
|
||||||
if addedkeycount > 0:
|
|
||||||
print u"{0} v{1}: {2:d} Barnes and Noble {3} imported from config folder.".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"key file" if addedkeycount==1 else u"key files")
|
|
||||||
# Make the json write all the prefs to disk
|
|
||||||
dedrmprefs.writeprefs(False)
|
|
||||||
|
|
||||||
# get any .der files in the config dir
|
|
||||||
priorkeycount = len(dedrmprefs['adeptkeys'])
|
|
||||||
adeptfilekeys = getConfigFiles('.der','hex')
|
|
||||||
for keypair in adeptfilekeys:
|
|
||||||
name = keypair[0]
|
|
||||||
value = keypair[1]
|
|
||||||
dedrmprefs.addnamedvaluetoprefs('adeptkeys', name, value)
|
|
||||||
addedkeycount = len(dedrmprefs['adeptkeys'])-priorkeycount
|
|
||||||
if addedkeycount > 0:
|
|
||||||
print u"{0} v{1}: {2:d} Adobe Adept {3} imported from config folder.".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"keyfile" if addedkeycount==1 else u"keyfiles")
|
|
||||||
# Make the json write all the prefs to disk
|
|
||||||
dedrmprefs.writeprefs(False)
|
|
||||||
|
|
||||||
# get ignoble json prefs
|
|
||||||
if 'keys' in ignobleprefs:
|
|
||||||
priorkeycount = len(dedrmprefs['bandnkeys'])
|
|
||||||
for name in ignobleprefs['keys']:
|
|
||||||
value = ignobleprefs['keys'][name]
|
|
||||||
dedrmprefs.addnamedvaluetoprefs('bandnkeys', name, value)
|
|
||||||
addedkeycount = len(dedrmprefs['bandnkeys']) - priorkeycount
|
|
||||||
# no need to delete old prefs, since they contain no recoverable private data
|
|
||||||
if addedkeycount > 0:
|
|
||||||
print u"{0} v{1}: {2:d} Barnes and Noble {3} imported from Ignoble plugin preferences.".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"key" if addedkeycount==1 else u"keys")
|
|
||||||
# Make the json write all the prefs to disk
|
|
||||||
dedrmprefs.writeprefs(False)
|
|
||||||
|
|
||||||
# get kindle json prefs
|
|
||||||
priorpidcount = len(dedrmprefs['pids'])
|
|
||||||
priorserialcount = len(dedrmprefs['serials'])
|
|
||||||
if 'pids' in kindleprefs:
|
|
||||||
pids, serials = parseKindleString(kindleprefs['pids'])
|
|
||||||
for pid in pids:
|
|
||||||
dedrmprefs.addvaluetoprefs('pids',pid)
|
|
||||||
if 'serials' in kindleprefs:
|
|
||||||
pids, serials = parseKindleString(kindleprefs['serials'])
|
|
||||||
for serial in serials:
|
|
||||||
dedrmprefs.addvaluetoprefs('serials',serial)
|
|
||||||
addedpidcount = len(dedrmprefs['pids']) - priorpidcount
|
|
||||||
if addedpidcount > 0:
|
|
||||||
print u"{0} v{1}: {2:d} {3} imported from Kindle plugin preferences".format(PLUGIN_NAME, PLUGIN_VERSION, addedpidcount, u"PID" if addedpidcount==1 else u"PIDs")
|
|
||||||
addedserialcount = len(dedrmprefs['serials']) - priorserialcount
|
|
||||||
if addedserialcount > 0:
|
|
||||||
print u"{0} v{1}: {2:d} {3} imported from Kindle plugin preferences".format(PLUGIN_NAME, PLUGIN_VERSION, addedserialcount, u"serial number" if addedserialcount==1 else u"serial numbers")
|
|
||||||
try:
|
|
||||||
if 'wineprefix' in kindleprefs and kindleprefs['wineprefix'] != "":
|
|
||||||
dedrmprefs.set('adobewineprefix',kindleprefs['wineprefix'])
|
|
||||||
dedrmprefs.set('kindlewineprefix',kindleprefs['wineprefix'])
|
|
||||||
print u"{0} v{1}: WINEPREFIX ‘(2)’ imported from Kindle plugin preferences".format(PLUGIN_NAME, PLUGIN_VERSION, kindleprefs['wineprefix'])
|
|
||||||
except:
|
|
||||||
traceback.print_exc()
|
|
||||||
|
|
||||||
|
|
||||||
# Make the json write all the prefs to disk
|
|
||||||
dedrmprefs.writeprefs()
|
|
||||||
print u"{0} v{1}: Finished setting up configuration data.".format(PLUGIN_NAME, PLUGIN_VERSION)
|
|
|
@ -0,0 +1,292 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
# Standard Python modules.
|
||||||
|
import os, sys, re, hashlib
|
||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from calibre.utils.config import dynamic, config_dir, JSONConfig
|
||||||
|
from calibre_plugins.dedrm.__init__ import PLUGIN_NAME, PLUGIN_VERSION
|
||||||
|
from calibre.constants import iswindows, isosx
|
||||||
|
|
||||||
|
class DeDRM_Prefs():
|
||||||
|
def __init__(self):
|
||||||
|
JSON_PATH = os.path.join(u"plugins", PLUGIN_NAME.strip().lower().replace(' ', '_') + '.json')
|
||||||
|
self.dedrmprefs = JSONConfig(JSON_PATH)
|
||||||
|
|
||||||
|
self.dedrmprefs.defaults['configured'] = False
|
||||||
|
self.dedrmprefs.defaults['bandnkeys'] = {}
|
||||||
|
self.dedrmprefs.defaults['adeptkeys'] = {}
|
||||||
|
self.dedrmprefs.defaults['ereaderkeys'] = {}
|
||||||
|
self.dedrmprefs.defaults['kindlekeys'] = {}
|
||||||
|
self.dedrmprefs.defaults['pids'] = []
|
||||||
|
self.dedrmprefs.defaults['serials'] = []
|
||||||
|
self.dedrmprefs.defaults['adobewineprefix'] = ""
|
||||||
|
self.dedrmprefs.defaults['kindlewineprefix'] = ""
|
||||||
|
|
||||||
|
# initialise
|
||||||
|
# we must actually set the prefs that are dictionaries and lists
|
||||||
|
# to empty dictionaries and lists, otherwise we are unable to add to them
|
||||||
|
# as then it just adds to the (memory only) dedrmprefs.defaults versions!
|
||||||
|
if self.dedrmprefs['bandnkeys'] == {}:
|
||||||
|
self.dedrmprefs['bandnkeys'] = {}
|
||||||
|
if self.dedrmprefs['adeptkeys'] == {}:
|
||||||
|
self.dedrmprefs['adeptkeys'] = {}
|
||||||
|
if self.dedrmprefs['ereaderkeys'] == {}:
|
||||||
|
self.dedrmprefs['ereaderkeys'] = {}
|
||||||
|
if self.dedrmprefs['kindlekeys'] == {}:
|
||||||
|
self.dedrmprefs['kindlekeys'] = {}
|
||||||
|
if self.dedrmprefs['pids'] == []:
|
||||||
|
self.dedrmprefs['pids'] = []
|
||||||
|
if self.dedrmprefs['serials'] == []:
|
||||||
|
self.dedrmprefs['serials'] = []
|
||||||
|
|
||||||
|
def __getitem__(self,kind = None):
|
||||||
|
if kind is not None:
|
||||||
|
return self.dedrmprefs[kind]
|
||||||
|
return self.dedrmprefs
|
||||||
|
|
||||||
|
def set(self, kind, value):
|
||||||
|
self.dedrmprefs[kind] = value
|
||||||
|
|
||||||
|
def writeprefs(self,value = True):
|
||||||
|
self.dedrmprefs['configured'] = value
|
||||||
|
|
||||||
|
def addnamedvaluetoprefs(self, prefkind, keyname, keyvalue):
|
||||||
|
try:
|
||||||
|
if keyvalue not in self.dedrmprefs[prefkind].values():
|
||||||
|
# ensure that the keyname is unique
|
||||||
|
# by adding a number (starting with 2) to the name if it is not
|
||||||
|
namecount = 1
|
||||||
|
newname = keyname
|
||||||
|
while newname in self.dedrmprefs[prefkind]:
|
||||||
|
namecount += 1
|
||||||
|
newname = "{0:s}_{1:d}".format(keyname,namecount)
|
||||||
|
# add to the preferences
|
||||||
|
self.dedrmprefs[prefkind][newname] = keyvalue
|
||||||
|
return (True, newname)
|
||||||
|
except:
|
||||||
|
traceback.print_exc()
|
||||||
|
pass
|
||||||
|
return (False, keyname)
|
||||||
|
|
||||||
|
def addvaluetoprefs(self, prefkind, prefsvalue):
|
||||||
|
# ensure the keyvalue isn't already in the preferences
|
||||||
|
try:
|
||||||
|
if prefsvalue not in self.dedrmprefs[prefkind]:
|
||||||
|
self.dedrmprefs[prefkind].append(prefsvalue)
|
||||||
|
return True
|
||||||
|
except:
|
||||||
|
traceback.print_exc()
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def convertprefs(always = False):
|
||||||
|
|
||||||
|
def parseIgnobleString(keystuff):
|
||||||
|
from calibre_plugins.dedrm.ignoblekeygen import generate_key
|
||||||
|
userkeys = []
|
||||||
|
ar = keystuff.split(':')
|
||||||
|
for keystring in ar:
|
||||||
|
try:
|
||||||
|
name, ccn = keystring.split(',')
|
||||||
|
# Generate Barnes & Noble EPUB user key from name and credit card number.
|
||||||
|
keyname = u"{0}_{1}".format(name.strip(),ccn.strip()[-4:])
|
||||||
|
keyvalue = generate_key(name, ccn)
|
||||||
|
userkeys.append([keyname,keyvalue])
|
||||||
|
except Exception, e:
|
||||||
|
traceback.print_exc()
|
||||||
|
print e.args[0]
|
||||||
|
pass
|
||||||
|
return userkeys
|
||||||
|
|
||||||
|
def parseeReaderString(keystuff):
|
||||||
|
from calibre_plugins.dedrm.erdr2pml import getuser_key
|
||||||
|
userkeys = []
|
||||||
|
ar = keystuff.split(':')
|
||||||
|
for keystring in ar:
|
||||||
|
try:
|
||||||
|
name, cc = keystring.split(',')
|
||||||
|
# Generate eReader user key from name and credit card number.
|
||||||
|
keyname = u"{0}_{1}".format(name.strip(),cc.strip()[-4:])
|
||||||
|
keyvalue = getuser_key(name,cc).encode('hex')
|
||||||
|
userkeys.append([keyname,keyvalue])
|
||||||
|
except Exception, e:
|
||||||
|
traceback.print_exc()
|
||||||
|
print e.args[0]
|
||||||
|
pass
|
||||||
|
return userkeys
|
||||||
|
|
||||||
|
def parseKindleString(keystuff):
|
||||||
|
pids = []
|
||||||
|
serials = []
|
||||||
|
ar = keystuff.split(',')
|
||||||
|
for keystring in ar:
|
||||||
|
keystring = str(keystring).strip().replace(" ","")
|
||||||
|
if len(keystring) == 10 or len(keystring) == 8 and keystring not in pids:
|
||||||
|
pids.append(keystring)
|
||||||
|
elif len(keystring) == 16 and keystring[0] == 'B' and keystring not in serials:
|
||||||
|
serials.append(keystring)
|
||||||
|
return (pids,serials)
|
||||||
|
|
||||||
|
def getConfigFiles(extension, encoding = None):
|
||||||
|
# get any files with extension 'extension' in the config dir
|
||||||
|
userkeys = []
|
||||||
|
files = [f for f in os.listdir(config_dir) if f.endswith(extension)]
|
||||||
|
for filename in files:
|
||||||
|
try:
|
||||||
|
fpath = os.path.join(config_dir, filename)
|
||||||
|
key = os.path.splitext(filename)[0]
|
||||||
|
value = open(fpath, 'rb').read()
|
||||||
|
if encoding is not None:
|
||||||
|
value = value.encode(encoding)
|
||||||
|
userkeys.append([key,value])
|
||||||
|
except:
|
||||||
|
traceback.print_exc()
|
||||||
|
pass
|
||||||
|
return userkeys
|
||||||
|
|
||||||
|
dedrmprefs = DeDRM_Prefs()
|
||||||
|
|
||||||
|
if (not always) and dedrmprefs['configured']:
|
||||||
|
# We've already converted old preferences,
|
||||||
|
# and we're not being forced to do it again, so just return
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
print u"{0} v{1}: Importing configuration data from old DeDRM plugins".format(PLUGIN_NAME, PLUGIN_VERSION)
|
||||||
|
|
||||||
|
IGNOBLEPLUGINNAME = "Ignoble Epub DeDRM"
|
||||||
|
EREADERPLUGINNAME = "eReader PDB 2 PML"
|
||||||
|
OLDKINDLEPLUGINNAME = "K4PC, K4Mac, Kindle Mobi and Topaz DeDRM"
|
||||||
|
|
||||||
|
# get prefs from older tools
|
||||||
|
kindleprefs = JSONConfig(os.path.join(u"plugins", u"K4MobiDeDRM"))
|
||||||
|
ignobleprefs = JSONConfig(os.path.join(u"plugins", u"ignoble_epub_dedrm"))
|
||||||
|
|
||||||
|
# Handle the old ignoble plugin's customization string by converting the
|
||||||
|
# old string to stored keys... get that personal data out of plain sight.
|
||||||
|
from calibre.customize.ui import config
|
||||||
|
sc = config['plugin_customization']
|
||||||
|
val = sc.pop(IGNOBLEPLUGINNAME, None)
|
||||||
|
if val is not None:
|
||||||
|
print u"{0} v{1}: Converting old Ignoble plugin configuration string.".format(PLUGIN_NAME, PLUGIN_VERSION)
|
||||||
|
priorkeycount = len(dedrmprefs['bandnkeys'])
|
||||||
|
userkeys = parseIgnobleString(str(val))
|
||||||
|
for keypair in userkeys:
|
||||||
|
name = keypair[0]
|
||||||
|
value = keypair[1]
|
||||||
|
dedrmprefs.addnamedvaluetoprefs('bandnkeys', name, value)
|
||||||
|
addedkeycount = len(dedrmprefs['bandnkeys'])-priorkeycount
|
||||||
|
print u"{0} v{1}: {2:d} Barnes and Noble {3} imported from old Ignoble plugin configuration string".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"key" if addedkeycount==1 else u"keys")
|
||||||
|
# Make the json write all the prefs to disk
|
||||||
|
dedrmprefs.writeprefs(False)
|
||||||
|
|
||||||
|
# Handle the old eReader plugin's customization string by converting the
|
||||||
|
# old string to stored keys... get that personal data out of plain sight.
|
||||||
|
val = sc.pop(EREADERPLUGINNAME, None)
|
||||||
|
if val is not None:
|
||||||
|
print u"{0} v{1}: Converting old eReader plugin configuration string.".format(PLUGIN_NAME, PLUGIN_VERSION)
|
||||||
|
priorkeycount = len(dedrmprefs['ereaderkeys'])
|
||||||
|
userkeys = parseeReaderString(str(val))
|
||||||
|
for keypair in userkeys:
|
||||||
|
name = keypair[0]
|
||||||
|
value = keypair[1]
|
||||||
|
dedrmprefs.addnamedvaluetoprefs('ereaderkeys', name, value)
|
||||||
|
addedkeycount = len(dedrmprefs['ereaderkeys'])-priorkeycount
|
||||||
|
print u"{0} v{1}: {2:d} eReader {3} imported from old eReader plugin configuration string".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"key" if addedkeycount==1 else u"keys")
|
||||||
|
# Make the json write all the prefs to disk
|
||||||
|
dedrmprefs.writeprefs(False)
|
||||||
|
|
||||||
|
# get old Kindle plugin configuration string
|
||||||
|
val = sc.pop(OLDKINDLEPLUGINNAME, None)
|
||||||
|
if val is not None:
|
||||||
|
print u"{0} v{1}: Converting old Kindle plugin configuration string.".format(PLUGIN_NAME, PLUGIN_VERSION)
|
||||||
|
priorpidcount = len(dedrmprefs['pids'])
|
||||||
|
priorserialcount = len(dedrmprefs['serials'])
|
||||||
|
pids, serials = parseKindleString(val)
|
||||||
|
for pid in pids:
|
||||||
|
dedrmprefs.addvaluetoprefs('pids',pid)
|
||||||
|
for serial in serials:
|
||||||
|
dedrmprefs.addvaluetoprefs('serials',serial)
|
||||||
|
addedpidcount = len(dedrmprefs['pids']) - priorpidcount
|
||||||
|
addedserialcount = len(dedrmprefs['serials']) - priorserialcount
|
||||||
|
print u"{0} v{1}: {2:d} {3} and {4:d} {5} imported from old Kindle plugin configuration string.".format(PLUGIN_NAME, PLUGIN_VERSION, addedpidcount, u"PID" if addedpidcount==1 else u"PIDs", addedserialcount, u"serial number" if addedserialcount==1 else u"serial numbers")
|
||||||
|
# Make the json write all the prefs to disk
|
||||||
|
dedrmprefs.writeprefs(False)
|
||||||
|
|
||||||
|
# copy the customisations back into calibre preferences, as we've now removed the nasty plaintext
|
||||||
|
config['plugin_customization'] = sc
|
||||||
|
|
||||||
|
# get any .b64 files in the config dir
|
||||||
|
priorkeycount = len(dedrmprefs['bandnkeys'])
|
||||||
|
bandnfilekeys = getConfigFiles('.b64')
|
||||||
|
for keypair in bandnfilekeys:
|
||||||
|
name = keypair[0]
|
||||||
|
value = keypair[1]
|
||||||
|
dedrmprefs.addnamedvaluetoprefs('bandnkeys', name, value)
|
||||||
|
addedkeycount = len(dedrmprefs['bandnkeys'])-priorkeycount
|
||||||
|
if addedkeycount > 0:
|
||||||
|
print u"{0} v{1}: {2:d} Barnes and Noble {3} imported from config folder.".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"key file" if addedkeycount==1 else u"key files")
|
||||||
|
# Make the json write all the prefs to disk
|
||||||
|
dedrmprefs.writeprefs(False)
|
||||||
|
|
||||||
|
# get any .der files in the config dir
|
||||||
|
priorkeycount = len(dedrmprefs['adeptkeys'])
|
||||||
|
adeptfilekeys = getConfigFiles('.der','hex')
|
||||||
|
for keypair in adeptfilekeys:
|
||||||
|
name = keypair[0]
|
||||||
|
value = keypair[1]
|
||||||
|
dedrmprefs.addnamedvaluetoprefs('adeptkeys', name, value)
|
||||||
|
addedkeycount = len(dedrmprefs['adeptkeys'])-priorkeycount
|
||||||
|
if addedkeycount > 0:
|
||||||
|
print u"{0} v{1}: {2:d} Adobe Adept {3} imported from config folder.".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"keyfile" if addedkeycount==1 else u"keyfiles")
|
||||||
|
# Make the json write all the prefs to disk
|
||||||
|
dedrmprefs.writeprefs(False)
|
||||||
|
|
||||||
|
# get ignoble json prefs
|
||||||
|
if 'keys' in ignobleprefs:
|
||||||
|
priorkeycount = len(dedrmprefs['bandnkeys'])
|
||||||
|
for name in ignobleprefs['keys']:
|
||||||
|
value = ignobleprefs['keys'][name]
|
||||||
|
dedrmprefs.addnamedvaluetoprefs('bandnkeys', name, value)
|
||||||
|
addedkeycount = len(dedrmprefs['bandnkeys']) - priorkeycount
|
||||||
|
# no need to delete old prefs, since they contain no recoverable private data
|
||||||
|
if addedkeycount > 0:
|
||||||
|
print u"{0} v{1}: {2:d} Barnes and Noble {3} imported from Ignoble plugin preferences.".format(PLUGIN_NAME, PLUGIN_VERSION, addedkeycount, u"key" if addedkeycount==1 else u"keys")
|
||||||
|
# Make the json write all the prefs to disk
|
||||||
|
dedrmprefs.writeprefs(False)
|
||||||
|
|
||||||
|
# get kindle json prefs
|
||||||
|
priorpidcount = len(dedrmprefs['pids'])
|
||||||
|
priorserialcount = len(dedrmprefs['serials'])
|
||||||
|
if 'pids' in kindleprefs:
|
||||||
|
pids, serials = parseKindleString(kindleprefs['pids'])
|
||||||
|
for pid in pids:
|
||||||
|
dedrmprefs.addvaluetoprefs('pids',pid)
|
||||||
|
if 'serials' in kindleprefs:
|
||||||
|
pids, serials = parseKindleString(kindleprefs['serials'])
|
||||||
|
for serial in serials:
|
||||||
|
dedrmprefs.addvaluetoprefs('serials',serial)
|
||||||
|
addedpidcount = len(dedrmprefs['pids']) - priorpidcount
|
||||||
|
if addedpidcount > 0:
|
||||||
|
print u"{0} v{1}: {2:d} {3} imported from Kindle plugin preferences".format(PLUGIN_NAME, PLUGIN_VERSION, addedpidcount, u"PID" if addedpidcount==1 else u"PIDs")
|
||||||
|
addedserialcount = len(dedrmprefs['serials']) - priorserialcount
|
||||||
|
if addedserialcount > 0:
|
||||||
|
print u"{0} v{1}: {2:d} {3} imported from Kindle plugin preferences".format(PLUGIN_NAME, PLUGIN_VERSION, addedserialcount, u"serial number" if addedserialcount==1 else u"serial numbers")
|
||||||
|
try:
|
||||||
|
if 'wineprefix' in kindleprefs and kindleprefs['wineprefix'] != "":
|
||||||
|
dedrmprefs.set('adobewineprefix',kindleprefs['wineprefix'])
|
||||||
|
dedrmprefs.set('kindlewineprefix',kindleprefs['wineprefix'])
|
||||||
|
print u"{0} v{1}: WINEPREFIX ‘(2)’ imported from Kindle plugin preferences".format(PLUGIN_NAME, PLUGIN_VERSION, kindleprefs['wineprefix'])
|
||||||
|
except:
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
|
||||||
|
# Make the json write all the prefs to disk
|
||||||
|
dedrmprefs.writeprefs()
|
||||||
|
print u"{0} v{1}: Finished setting up configuration data.".format(PLUGIN_NAME, PLUGIN_VERSION)
|
|
@ -178,7 +178,12 @@ class DocParser(object):
|
||||||
if val == "":
|
if val == "":
|
||||||
val = 0
|
val = 0
|
||||||
|
|
||||||
if not ((attr == 'hang') and (int(val) == 0)) :
|
if not ((attr == 'hang') and (int(val) == 0)):
|
||||||
|
try:
|
||||||
|
f = float(val)
|
||||||
|
except:
|
||||||
|
print "Warning: unrecognised val, ignoring"
|
||||||
|
val = 0
|
||||||
pv = float(val)/scale
|
pv = float(val)/scale
|
||||||
cssargs[attr] = (self.attr_val_map[attr], pv)
|
cssargs[attr] = (self.attr_val_map[attr], pv)
|
||||||
keep = True
|
keep = True
|
||||||
|
|
|
@ -0,0 +1,148 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
||||||
|
|
||||||
|
import os, sys
|
||||||
|
import signal
|
||||||
|
import threading
|
||||||
|
import subprocess
|
||||||
|
from subprocess import Popen, PIPE, STDOUT
|
||||||
|
|
||||||
|
# **heavily** chopped up and modfied version of asyncproc.py
|
||||||
|
# to make it actually work on Windows as well as Mac/Linux
|
||||||
|
# For the original see:
|
||||||
|
# "http://www.lysator.liu.se/~bellman/download/"
|
||||||
|
# author is "Thomas Bellman <bellman@lysator.liu.se>"
|
||||||
|
# available under GPL version 3 or Later
|
||||||
|
|
||||||
|
# create an asynchronous subprocess whose output can be collected in
|
||||||
|
# a non-blocking manner
|
||||||
|
|
||||||
|
# What a mess! Have to use threads just to get non-blocking io
|
||||||
|
# in a cross-platform manner
|
||||||
|
|
||||||
|
# luckily all thread use is hidden within this class
|
||||||
|
|
||||||
|
class Process(object):
|
||||||
|
def __init__(self, *params, **kwparams):
|
||||||
|
if len(params) <= 3:
|
||||||
|
kwparams.setdefault('stdin', subprocess.PIPE)
|
||||||
|
if len(params) <= 4:
|
||||||
|
kwparams.setdefault('stdout', subprocess.PIPE)
|
||||||
|
if len(params) <= 5:
|
||||||
|
kwparams.setdefault('stderr', subprocess.PIPE)
|
||||||
|
self.__pending_input = []
|
||||||
|
self.__collected_outdata = []
|
||||||
|
self.__collected_errdata = []
|
||||||
|
self.__exitstatus = None
|
||||||
|
self.__lock = threading.Lock()
|
||||||
|
self.__inputsem = threading.Semaphore(0)
|
||||||
|
self.__quit = False
|
||||||
|
|
||||||
|
self.__process = subprocess.Popen(*params, **kwparams)
|
||||||
|
|
||||||
|
if self.__process.stdin:
|
||||||
|
self.__stdin_thread = threading.Thread(
|
||||||
|
name="stdin-thread",
|
||||||
|
target=self.__feeder, args=(self.__pending_input,
|
||||||
|
self.__process.stdin))
|
||||||
|
self.__stdin_thread.setDaemon(True)
|
||||||
|
self.__stdin_thread.start()
|
||||||
|
|
||||||
|
if self.__process.stdout:
|
||||||
|
self.__stdout_thread = threading.Thread(
|
||||||
|
name="stdout-thread",
|
||||||
|
target=self.__reader, args=(self.__collected_outdata,
|
||||||
|
self.__process.stdout))
|
||||||
|
self.__stdout_thread.setDaemon(True)
|
||||||
|
self.__stdout_thread.start()
|
||||||
|
|
||||||
|
if self.__process.stderr:
|
||||||
|
self.__stderr_thread = threading.Thread(
|
||||||
|
name="stderr-thread",
|
||||||
|
target=self.__reader, args=(self.__collected_errdata,
|
||||||
|
self.__process.stderr))
|
||||||
|
self.__stderr_thread.setDaemon(True)
|
||||||
|
self.__stderr_thread.start()
|
||||||
|
|
||||||
|
def pid(self):
|
||||||
|
return self.__process.pid
|
||||||
|
|
||||||
|
def kill(self, signal):
|
||||||
|
self.__process.send_signal(signal)
|
||||||
|
|
||||||
|
# check on subprocess (pass in 'nowait') to act like poll
|
||||||
|
def wait(self, flag):
|
||||||
|
if flag.lower() == 'nowait':
|
||||||
|
rc = self.__process.poll()
|
||||||
|
else:
|
||||||
|
rc = self.__process.wait()
|
||||||
|
if rc != None:
|
||||||
|
if self.__process.stdin:
|
||||||
|
self.closeinput()
|
||||||
|
if self.__process.stdout:
|
||||||
|
self.__stdout_thread.join()
|
||||||
|
if self.__process.stderr:
|
||||||
|
self.__stderr_thread.join()
|
||||||
|
return self.__process.returncode
|
||||||
|
|
||||||
|
def terminate(self):
|
||||||
|
if self.__process.stdin:
|
||||||
|
self.closeinput()
|
||||||
|
self.__process.terminate()
|
||||||
|
|
||||||
|
# thread gets data from subprocess stdout
|
||||||
|
def __reader(self, collector, source):
|
||||||
|
while True:
|
||||||
|
data = os.read(source.fileno(), 65536)
|
||||||
|
self.__lock.acquire()
|
||||||
|
collector.append(data)
|
||||||
|
self.__lock.release()
|
||||||
|
if data == "":
|
||||||
|
source.close()
|
||||||
|
break
|
||||||
|
return
|
||||||
|
|
||||||
|
# thread feeds data to subprocess stdin
|
||||||
|
def __feeder(self, pending, drain):
|
||||||
|
while True:
|
||||||
|
self.__inputsem.acquire()
|
||||||
|
self.__lock.acquire()
|
||||||
|
if not pending and self.__quit:
|
||||||
|
drain.close()
|
||||||
|
self.__lock.release()
|
||||||
|
break
|
||||||
|
data = pending.pop(0)
|
||||||
|
self.__lock.release()
|
||||||
|
drain.write(data)
|
||||||
|
|
||||||
|
# non-blocking read of data from subprocess stdout
|
||||||
|
def read(self):
|
||||||
|
self.__lock.acquire()
|
||||||
|
outdata = "".join(self.__collected_outdata)
|
||||||
|
del self.__collected_outdata[:]
|
||||||
|
self.__lock.release()
|
||||||
|
return outdata
|
||||||
|
|
||||||
|
# non-blocking read of data from subprocess stderr
|
||||||
|
def readerr(self):
|
||||||
|
self.__lock.acquire()
|
||||||
|
errdata = "".join(self.__collected_errdata)
|
||||||
|
del self.__collected_errdata[:]
|
||||||
|
self.__lock.release()
|
||||||
|
return errdata
|
||||||
|
|
||||||
|
# non-blocking write to stdin of subprocess
|
||||||
|
def write(self, data):
|
||||||
|
if self.__process.stdin is None:
|
||||||
|
raise ValueError("Writing to process with stdin not a pipe")
|
||||||
|
self.__lock.acquire()
|
||||||
|
self.__pending_input.append(data)
|
||||||
|
self.__inputsem.release()
|
||||||
|
self.__lock.release()
|
||||||
|
|
||||||
|
# close stdinput of subprocess
|
||||||
|
def closeinput(self):
|
||||||
|
self.__lock.acquire()
|
||||||
|
self.__quit = True
|
||||||
|
self.__inputsem.release()
|
||||||
|
self.__lock.release()
|
|
@ -1,148 +1,538 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import os, sys
|
# topazextract.py
|
||||||
import signal
|
# Mostly written by some_updates based on code from many others
|
||||||
import threading
|
|
||||||
import subprocess
|
|
||||||
from subprocess import Popen, PIPE, STDOUT
|
|
||||||
|
|
||||||
# **heavily** chopped up and modfied version of asyncproc.py
|
# Changelog
|
||||||
# to make it actually work on Windows as well as Mac/Linux
|
# 4.9 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
||||||
# For the original see:
|
# 5.0 - Fixed potential unicode problem with command line interface
|
||||||
# "http://www.lysator.liu.se/~bellman/download/"
|
|
||||||
# author is "Thomas Bellman <bellman@lysator.liu.se>"
|
|
||||||
# available under GPL version 3 or Later
|
|
||||||
|
|
||||||
# create an asynchronous subprocess whose output can be collected in
|
__version__ = '5.0'
|
||||||
# a non-blocking manner
|
|
||||||
|
|
||||||
# What a mess! Have to use threads just to get non-blocking io
|
import sys
|
||||||
# in a cross-platform manner
|
import os, csv, getopt
|
||||||
|
import zlib, zipfile, tempfile, shutil
|
||||||
|
import traceback
|
||||||
|
from struct import pack
|
||||||
|
from struct import unpack
|
||||||
|
from alfcrypto import Topaz_Cipher
|
||||||
|
|
||||||
# luckily all thread use is hidden within this class
|
class SafeUnbuffered:
|
||||||
|
def __init__(self, stream):
|
||||||
class Process(object):
|
self.stream = stream
|
||||||
def __init__(self, *params, **kwparams):
|
self.encoding = stream.encoding
|
||||||
if len(params) <= 3:
|
if self.encoding == None:
|
||||||
kwparams.setdefault('stdin', subprocess.PIPE)
|
self.encoding = "utf-8"
|
||||||
if len(params) <= 4:
|
|
||||||
kwparams.setdefault('stdout', subprocess.PIPE)
|
|
||||||
if len(params) <= 5:
|
|
||||||
kwparams.setdefault('stderr', subprocess.PIPE)
|
|
||||||
self.__pending_input = []
|
|
||||||
self.__collected_outdata = []
|
|
||||||
self.__collected_errdata = []
|
|
||||||
self.__exitstatus = None
|
|
||||||
self.__lock = threading.Lock()
|
|
||||||
self.__inputsem = threading.Semaphore(0)
|
|
||||||
self.__quit = False
|
|
||||||
|
|
||||||
self.__process = subprocess.Popen(*params, **kwparams)
|
|
||||||
|
|
||||||
if self.__process.stdin:
|
|
||||||
self.__stdin_thread = threading.Thread(
|
|
||||||
name="stdin-thread",
|
|
||||||
target=self.__feeder, args=(self.__pending_input,
|
|
||||||
self.__process.stdin))
|
|
||||||
self.__stdin_thread.setDaemon(True)
|
|
||||||
self.__stdin_thread.start()
|
|
||||||
|
|
||||||
if self.__process.stdout:
|
|
||||||
self.__stdout_thread = threading.Thread(
|
|
||||||
name="stdout-thread",
|
|
||||||
target=self.__reader, args=(self.__collected_outdata,
|
|
||||||
self.__process.stdout))
|
|
||||||
self.__stdout_thread.setDaemon(True)
|
|
||||||
self.__stdout_thread.start()
|
|
||||||
|
|
||||||
if self.__process.stderr:
|
|
||||||
self.__stderr_thread = threading.Thread(
|
|
||||||
name="stderr-thread",
|
|
||||||
target=self.__reader, args=(self.__collected_errdata,
|
|
||||||
self.__process.stderr))
|
|
||||||
self.__stderr_thread.setDaemon(True)
|
|
||||||
self.__stderr_thread.start()
|
|
||||||
|
|
||||||
def pid(self):
|
|
||||||
return self.__process.pid
|
|
||||||
|
|
||||||
def kill(self, signal):
|
|
||||||
self.__process.send_signal(signal)
|
|
||||||
|
|
||||||
# check on subprocess (pass in 'nowait') to act like poll
|
|
||||||
def wait(self, flag):
|
|
||||||
if flag.lower() == 'nowait':
|
|
||||||
rc = self.__process.poll()
|
|
||||||
else:
|
|
||||||
rc = self.__process.wait()
|
|
||||||
if rc != None:
|
|
||||||
if self.__process.stdin:
|
|
||||||
self.closeinput()
|
|
||||||
if self.__process.stdout:
|
|
||||||
self.__stdout_thread.join()
|
|
||||||
if self.__process.stderr:
|
|
||||||
self.__stderr_thread.join()
|
|
||||||
return self.__process.returncode
|
|
||||||
|
|
||||||
def terminate(self):
|
|
||||||
if self.__process.stdin:
|
|
||||||
self.closeinput()
|
|
||||||
self.__process.terminate()
|
|
||||||
|
|
||||||
# thread gets data from subprocess stdout
|
|
||||||
def __reader(self, collector, source):
|
|
||||||
while True:
|
|
||||||
data = os.read(source.fileno(), 65536)
|
|
||||||
self.__lock.acquire()
|
|
||||||
collector.append(data)
|
|
||||||
self.__lock.release()
|
|
||||||
if data == "":
|
|
||||||
source.close()
|
|
||||||
break
|
|
||||||
return
|
|
||||||
|
|
||||||
# thread feeds data to subprocess stdin
|
|
||||||
def __feeder(self, pending, drain):
|
|
||||||
while True:
|
|
||||||
self.__inputsem.acquire()
|
|
||||||
self.__lock.acquire()
|
|
||||||
if not pending and self.__quit:
|
|
||||||
drain.close()
|
|
||||||
self.__lock.release()
|
|
||||||
break
|
|
||||||
data = pending.pop(0)
|
|
||||||
self.__lock.release()
|
|
||||||
drain.write(data)
|
|
||||||
|
|
||||||
# non-blocking read of data from subprocess stdout
|
|
||||||
def read(self):
|
|
||||||
self.__lock.acquire()
|
|
||||||
outdata = "".join(self.__collected_outdata)
|
|
||||||
del self.__collected_outdata[:]
|
|
||||||
self.__lock.release()
|
|
||||||
return outdata
|
|
||||||
|
|
||||||
# non-blocking read of data from subprocess stderr
|
|
||||||
def readerr(self):
|
|
||||||
self.__lock.acquire()
|
|
||||||
errdata = "".join(self.__collected_errdata)
|
|
||||||
del self.__collected_errdata[:]
|
|
||||||
self.__lock.release()
|
|
||||||
return errdata
|
|
||||||
|
|
||||||
# non-blocking write to stdin of subprocess
|
|
||||||
def write(self, data):
|
def write(self, data):
|
||||||
if self.__process.stdin is None:
|
if isinstance(data,unicode):
|
||||||
raise ValueError("Writing to process with stdin not a pipe")
|
data = data.encode(self.encoding,"replace")
|
||||||
self.__lock.acquire()
|
self.stream.write(data)
|
||||||
self.__pending_input.append(data)
|
self.stream.flush()
|
||||||
self.__inputsem.release()
|
def __getattr__(self, attr):
|
||||||
self.__lock.release()
|
return getattr(self.stream, attr)
|
||||||
|
|
||||||
# close stdinput of subprocess
|
iswindows = sys.platform.startswith('win')
|
||||||
def closeinput(self):
|
isosx = sys.platform.startswith('darwin')
|
||||||
self.__lock.acquire()
|
|
||||||
self.__quit = True
|
def unicode_argv():
|
||||||
self.__inputsem.release()
|
if iswindows:
|
||||||
self.__lock.release()
|
# Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode
|
||||||
|
# strings.
|
||||||
|
|
||||||
|
# Versions 2.x of Python don't support Unicode in sys.argv on
|
||||||
|
# Windows, with the underlying Windows API instead replacing multi-byte
|
||||||
|
# characters with '?'.
|
||||||
|
|
||||||
|
|
||||||
|
from ctypes import POINTER, byref, cdll, c_int, windll
|
||||||
|
from ctypes.wintypes import LPCWSTR, LPWSTR
|
||||||
|
|
||||||
|
GetCommandLineW = cdll.kernel32.GetCommandLineW
|
||||||
|
GetCommandLineW.argtypes = []
|
||||||
|
GetCommandLineW.restype = LPCWSTR
|
||||||
|
|
||||||
|
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
|
||||||
|
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
|
||||||
|
CommandLineToArgvW.restype = POINTER(LPWSTR)
|
||||||
|
|
||||||
|
cmd = GetCommandLineW()
|
||||||
|
argc = c_int(0)
|
||||||
|
argv = CommandLineToArgvW(cmd, byref(argc))
|
||||||
|
if argc.value > 0:
|
||||||
|
# Remove Python executable and commands if present
|
||||||
|
start = argc.value - len(sys.argv)
|
||||||
|
return [argv[i] for i in
|
||||||
|
xrange(start, argc.value)]
|
||||||
|
# if we don't have any arguments at all, just pass back script name
|
||||||
|
# this should never happen
|
||||||
|
return [u"mobidedrm.py"]
|
||||||
|
else:
|
||||||
|
argvencoding = sys.stdin.encoding
|
||||||
|
if argvencoding == None:
|
||||||
|
argvencoding = 'utf-8'
|
||||||
|
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
||||||
|
|
||||||
|
#global switch
|
||||||
|
debug = False
|
||||||
|
|
||||||
|
if 'calibre' in sys.modules:
|
||||||
|
inCalibre = True
|
||||||
|
from calibre_plugins.dedrm import kgenpids
|
||||||
|
else:
|
||||||
|
inCalibre = False
|
||||||
|
import kgenpids
|
||||||
|
|
||||||
|
|
||||||
|
class DrmException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# recursive zip creation support routine
|
||||||
|
def zipUpDir(myzip, tdir, localname):
|
||||||
|
currentdir = tdir
|
||||||
|
if localname != u"":
|
||||||
|
currentdir = os.path.join(currentdir,localname)
|
||||||
|
list = os.listdir(currentdir)
|
||||||
|
for file in list:
|
||||||
|
afilename = file
|
||||||
|
localfilePath = os.path.join(localname, afilename)
|
||||||
|
realfilePath = os.path.join(currentdir,file)
|
||||||
|
if os.path.isfile(realfilePath):
|
||||||
|
myzip.write(realfilePath, localfilePath)
|
||||||
|
elif os.path.isdir(realfilePath):
|
||||||
|
zipUpDir(myzip, tdir, localfilePath)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Utility routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# Get a 7 bit encoded number from file
|
||||||
|
def bookReadEncodedNumber(fo):
|
||||||
|
flag = False
|
||||||
|
data = ord(fo.read(1))
|
||||||
|
if data == 0xFF:
|
||||||
|
flag = True
|
||||||
|
data = ord(fo.read(1))
|
||||||
|
if data >= 0x80:
|
||||||
|
datax = (data & 0x7F)
|
||||||
|
while data >= 0x80 :
|
||||||
|
data = ord(fo.read(1))
|
||||||
|
datax = (datax <<7) + (data & 0x7F)
|
||||||
|
data = datax
|
||||||
|
if flag:
|
||||||
|
data = -data
|
||||||
|
return data
|
||||||
|
|
||||||
|
# Get a length prefixed string from file
|
||||||
|
def bookReadString(fo):
|
||||||
|
stringLength = bookReadEncodedNumber(fo)
|
||||||
|
return unpack(str(stringLength)+'s',fo.read(stringLength))[0]
|
||||||
|
|
||||||
|
#
|
||||||
|
# crypto routines
|
||||||
|
#
|
||||||
|
|
||||||
|
# Context initialisation for the Topaz Crypto
|
||||||
|
def topazCryptoInit(key):
|
||||||
|
return Topaz_Cipher().ctx_init(key)
|
||||||
|
|
||||||
|
# ctx1 = 0x0CAFFE19E
|
||||||
|
# for keyChar in key:
|
||||||
|
# keyByte = ord(keyChar)
|
||||||
|
# ctx2 = ctx1
|
||||||
|
# ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
||||||
|
# return [ctx1,ctx2]
|
||||||
|
|
||||||
|
# decrypt data with the context prepared by topazCryptoInit()
|
||||||
|
def topazCryptoDecrypt(data, ctx):
|
||||||
|
return Topaz_Cipher().decrypt(data, ctx)
|
||||||
|
# ctx1 = ctx[0]
|
||||||
|
# ctx2 = ctx[1]
|
||||||
|
# plainText = ""
|
||||||
|
# for dataChar in data:
|
||||||
|
# dataByte = ord(dataChar)
|
||||||
|
# m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
||||||
|
# ctx2 = ctx1
|
||||||
|
# ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
||||||
|
# plainText += chr(m)
|
||||||
|
# return plainText
|
||||||
|
|
||||||
|
# Decrypt data with the PID
|
||||||
|
def decryptRecord(data,PID):
|
||||||
|
ctx = topazCryptoInit(PID)
|
||||||
|
return topazCryptoDecrypt(data, ctx)
|
||||||
|
|
||||||
|
# Try to decrypt a dkey record (contains the bookPID)
|
||||||
|
def decryptDkeyRecord(data,PID):
|
||||||
|
record = decryptRecord(data,PID)
|
||||||
|
fields = unpack('3sB8sB8s3s',record)
|
||||||
|
if fields[0] != 'PID' or fields[5] != 'pid' :
|
||||||
|
raise DrmException(u"Didn't find PID magic numbers in record")
|
||||||
|
elif fields[1] != 8 or fields[3] != 8 :
|
||||||
|
raise DrmException(u"Record didn't contain correct length fields")
|
||||||
|
elif fields[2] != PID :
|
||||||
|
raise DrmException(u"Record didn't contain PID")
|
||||||
|
return fields[4]
|
||||||
|
|
||||||
|
# Decrypt all dkey records (contain the book PID)
|
||||||
|
def decryptDkeyRecords(data,PID):
|
||||||
|
nbKeyRecords = ord(data[0])
|
||||||
|
records = []
|
||||||
|
data = data[1:]
|
||||||
|
for i in range (0,nbKeyRecords):
|
||||||
|
length = ord(data[0])
|
||||||
|
try:
|
||||||
|
key = decryptDkeyRecord(data[1:length+1],PID)
|
||||||
|
records.append(key)
|
||||||
|
except DrmException:
|
||||||
|
pass
|
||||||
|
data = data[1+length:]
|
||||||
|
if len(records) == 0:
|
||||||
|
raise DrmException(u"BookKey Not Found")
|
||||||
|
return records
|
||||||
|
|
||||||
|
|
||||||
|
class TopazBook:
|
||||||
|
def __init__(self, filename):
|
||||||
|
self.fo = file(filename, 'rb')
|
||||||
|
self.outdir = tempfile.mkdtemp()
|
||||||
|
# self.outdir = 'rawdat'
|
||||||
|
self.bookPayloadOffset = 0
|
||||||
|
self.bookHeaderRecords = {}
|
||||||
|
self.bookMetadata = {}
|
||||||
|
self.bookKey = None
|
||||||
|
magic = unpack('4s',self.fo.read(4))[0]
|
||||||
|
if magic != 'TPZ0':
|
||||||
|
raise DrmException(u"Parse Error : Invalid Header, not a Topaz file")
|
||||||
|
self.parseTopazHeaders()
|
||||||
|
self.parseMetadata()
|
||||||
|
|
||||||
|
def parseTopazHeaders(self):
|
||||||
|
def bookReadHeaderRecordData():
|
||||||
|
# Read and return the data of one header record at the current book file position
|
||||||
|
# [[offset,decompressedLength,compressedLength],...]
|
||||||
|
nbValues = bookReadEncodedNumber(self.fo)
|
||||||
|
if debug: print "%d records in header " % nbValues,
|
||||||
|
values = []
|
||||||
|
for i in range (0,nbValues):
|
||||||
|
values.append([bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo)])
|
||||||
|
return values
|
||||||
|
def parseTopazHeaderRecord():
|
||||||
|
# Read and parse one header record at the current book file position and return the associated data
|
||||||
|
# [[offset,decompressedLength,compressedLength],...]
|
||||||
|
if ord(self.fo.read(1)) != 0x63:
|
||||||
|
raise DrmException(u"Parse Error : Invalid Header")
|
||||||
|
tag = bookReadString(self.fo)
|
||||||
|
record = bookReadHeaderRecordData()
|
||||||
|
return [tag,record]
|
||||||
|
nbRecords = bookReadEncodedNumber(self.fo)
|
||||||
|
if debug: print "Headers: %d" % nbRecords
|
||||||
|
for i in range (0,nbRecords):
|
||||||
|
result = parseTopazHeaderRecord()
|
||||||
|
if debug: print result[0], ": ", result[1]
|
||||||
|
self.bookHeaderRecords[result[0]] = result[1]
|
||||||
|
if ord(self.fo.read(1)) != 0x64 :
|
||||||
|
raise DrmException(u"Parse Error : Invalid Header")
|
||||||
|
self.bookPayloadOffset = self.fo.tell()
|
||||||
|
|
||||||
|
def parseMetadata(self):
|
||||||
|
# Parse the metadata record from the book payload and return a list of [key,values]
|
||||||
|
self.fo.seek(self.bookPayloadOffset + self.bookHeaderRecords['metadata'][0][0])
|
||||||
|
tag = bookReadString(self.fo)
|
||||||
|
if tag != 'metadata' :
|
||||||
|
raise DrmException(u"Parse Error : Record Names Don't Match")
|
||||||
|
flags = ord(self.fo.read(1))
|
||||||
|
nbRecords = ord(self.fo.read(1))
|
||||||
|
if debug: print "Metadata Records: %d" % nbRecords
|
||||||
|
for i in range (0,nbRecords) :
|
||||||
|
keyval = bookReadString(self.fo)
|
||||||
|
content = bookReadString(self.fo)
|
||||||
|
if debug: print keyval
|
||||||
|
if debug: print content
|
||||||
|
self.bookMetadata[keyval] = content
|
||||||
|
return self.bookMetadata
|
||||||
|
|
||||||
|
def getPIDMetaInfo(self):
|
||||||
|
keysRecord = self.bookMetadata.get('keys','')
|
||||||
|
keysRecordRecord = ''
|
||||||
|
if keysRecord != '':
|
||||||
|
keylst = keysRecord.split(',')
|
||||||
|
for keyval in keylst:
|
||||||
|
keysRecordRecord += self.bookMetadata.get(keyval,'')
|
||||||
|
return keysRecord, keysRecordRecord
|
||||||
|
|
||||||
|
def getBookTitle(self):
|
||||||
|
title = ''
|
||||||
|
if 'Title' in self.bookMetadata:
|
||||||
|
title = self.bookMetadata['Title']
|
||||||
|
return title.decode('utf-8')
|
||||||
|
|
||||||
|
def setBookKey(self, key):
|
||||||
|
self.bookKey = key
|
||||||
|
|
||||||
|
def getBookPayloadRecord(self, name, index):
|
||||||
|
# Get a record in the book payload, given its name and index.
|
||||||
|
# decrypted and decompressed if necessary
|
||||||
|
encrypted = False
|
||||||
|
compressed = False
|
||||||
|
try:
|
||||||
|
recordOffset = self.bookHeaderRecords[name][index][0]
|
||||||
|
except:
|
||||||
|
raise DrmException("Parse Error : Invalid Record, record not found")
|
||||||
|
|
||||||
|
self.fo.seek(self.bookPayloadOffset + recordOffset)
|
||||||
|
|
||||||
|
tag = bookReadString(self.fo)
|
||||||
|
if tag != name :
|
||||||
|
raise DrmException("Parse Error : Invalid Record, record name doesn't match")
|
||||||
|
|
||||||
|
recordIndex = bookReadEncodedNumber(self.fo)
|
||||||
|
if recordIndex < 0 :
|
||||||
|
encrypted = True
|
||||||
|
recordIndex = -recordIndex -1
|
||||||
|
|
||||||
|
if recordIndex != index :
|
||||||
|
raise DrmException("Parse Error : Invalid Record, index doesn't match")
|
||||||
|
|
||||||
|
if (self.bookHeaderRecords[name][index][2] > 0):
|
||||||
|
compressed = True
|
||||||
|
record = self.fo.read(self.bookHeaderRecords[name][index][2])
|
||||||
|
else:
|
||||||
|
record = self.fo.read(self.bookHeaderRecords[name][index][1])
|
||||||
|
|
||||||
|
if encrypted:
|
||||||
|
if self.bookKey:
|
||||||
|
ctx = topazCryptoInit(self.bookKey)
|
||||||
|
record = topazCryptoDecrypt(record,ctx)
|
||||||
|
else :
|
||||||
|
raise DrmException("Error: Attempt to decrypt without bookKey")
|
||||||
|
|
||||||
|
if compressed:
|
||||||
|
record = zlib.decompress(record)
|
||||||
|
|
||||||
|
return record
|
||||||
|
|
||||||
|
def processBook(self, pidlst):
|
||||||
|
raw = 0
|
||||||
|
fixedimage=True
|
||||||
|
try:
|
||||||
|
keydata = self.getBookPayloadRecord('dkey', 0)
|
||||||
|
except DrmException, e:
|
||||||
|
print u"no dkey record found, book may not be encrypted"
|
||||||
|
print u"attempting to extrct files without a book key"
|
||||||
|
self.createBookDirectory()
|
||||||
|
self.extractFiles()
|
||||||
|
print u"Successfully Extracted Topaz contents"
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.dedrm import genbook
|
||||||
|
else:
|
||||||
|
import genbook
|
||||||
|
|
||||||
|
rv = genbook.generateBook(self.outdir, raw, fixedimage)
|
||||||
|
if rv == 0:
|
||||||
|
print u"Book Successfully generated."
|
||||||
|
return rv
|
||||||
|
|
||||||
|
# try each pid to decode the file
|
||||||
|
bookKey = None
|
||||||
|
for pid in pidlst:
|
||||||
|
# use 8 digit pids here
|
||||||
|
pid = pid[0:8]
|
||||||
|
print u"Trying: {0}".format(pid)
|
||||||
|
bookKeys = []
|
||||||
|
data = keydata
|
||||||
|
try:
|
||||||
|
bookKeys+=decryptDkeyRecords(data,pid)
|
||||||
|
except DrmException, e:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
bookKey = bookKeys[0]
|
||||||
|
print u"Book Key Found! ({0})".format(bookKey.encode('hex'))
|
||||||
|
break
|
||||||
|
|
||||||
|
if not bookKey:
|
||||||
|
raise DrmException(u"No key found in {0:d} keys tried. Read the FAQs at Alf's blog: http://apprenticealf.wordpress.com/".format(len(pidlst)))
|
||||||
|
|
||||||
|
self.setBookKey(bookKey)
|
||||||
|
self.createBookDirectory()
|
||||||
|
self.extractFiles()
|
||||||
|
print u"Successfully Extracted Topaz contents"
|
||||||
|
if inCalibre:
|
||||||
|
from calibre_plugins.dedrm import genbook
|
||||||
|
else:
|
||||||
|
import genbook
|
||||||
|
|
||||||
|
rv = genbook.generateBook(self.outdir, raw, fixedimage)
|
||||||
|
if rv == 0:
|
||||||
|
print u"Book Successfully generated"
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def createBookDirectory(self):
|
||||||
|
outdir = self.outdir
|
||||||
|
# create output directory structure
|
||||||
|
if not os.path.exists(outdir):
|
||||||
|
os.makedirs(outdir)
|
||||||
|
destdir = os.path.join(outdir,u"img")
|
||||||
|
if not os.path.exists(destdir):
|
||||||
|
os.makedirs(destdir)
|
||||||
|
destdir = os.path.join(outdir,u"color_img")
|
||||||
|
if not os.path.exists(destdir):
|
||||||
|
os.makedirs(destdir)
|
||||||
|
destdir = os.path.join(outdir,u"page")
|
||||||
|
if not os.path.exists(destdir):
|
||||||
|
os.makedirs(destdir)
|
||||||
|
destdir = os.path.join(outdir,u"glyphs")
|
||||||
|
if not os.path.exists(destdir):
|
||||||
|
os.makedirs(destdir)
|
||||||
|
|
||||||
|
def extractFiles(self):
|
||||||
|
outdir = self.outdir
|
||||||
|
for headerRecord in self.bookHeaderRecords:
|
||||||
|
name = headerRecord
|
||||||
|
if name != 'dkey':
|
||||||
|
ext = u".dat"
|
||||||
|
if name == 'img': ext = u".jpg"
|
||||||
|
if name == 'color' : ext = u".jpg"
|
||||||
|
print u"Processing Section: {0}\n. . .".format(name),
|
||||||
|
for index in range (0,len(self.bookHeaderRecords[name])) :
|
||||||
|
fname = u"{0}{1:04d}{2}".format(name,index,ext)
|
||||||
|
destdir = outdir
|
||||||
|
if name == 'img':
|
||||||
|
destdir = os.path.join(outdir,u"img")
|
||||||
|
if name == 'color':
|
||||||
|
destdir = os.path.join(outdir,u"color_img")
|
||||||
|
if name == 'page':
|
||||||
|
destdir = os.path.join(outdir,u"page")
|
||||||
|
if name == 'glyphs':
|
||||||
|
destdir = os.path.join(outdir,u"glyphs")
|
||||||
|
outputFile = os.path.join(destdir,fname)
|
||||||
|
print u".",
|
||||||
|
record = self.getBookPayloadRecord(name,index)
|
||||||
|
if record != '':
|
||||||
|
file(outputFile, 'wb').write(record)
|
||||||
|
print u" "
|
||||||
|
|
||||||
|
def getFile(self, zipname):
|
||||||
|
htmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
||||||
|
htmlzip.write(os.path.join(self.outdir,u"book.html"),u"book.html")
|
||||||
|
htmlzip.write(os.path.join(self.outdir,u"book.opf"),u"book.opf")
|
||||||
|
if os.path.isfile(os.path.join(self.outdir,u"cover.jpg")):
|
||||||
|
htmlzip.write(os.path.join(self.outdir,u"cover.jpg"),u"cover.jpg")
|
||||||
|
htmlzip.write(os.path.join(self.outdir,u"style.css"),u"style.css")
|
||||||
|
zipUpDir(htmlzip, self.outdir, u"img")
|
||||||
|
htmlzip.close()
|
||||||
|
|
||||||
|
def getBookType(self):
|
||||||
|
return u"Topaz"
|
||||||
|
|
||||||
|
def getBookExtension(self):
|
||||||
|
return u".htmlz"
|
||||||
|
|
||||||
|
def getSVGZip(self, zipname):
|
||||||
|
svgzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
||||||
|
svgzip.write(os.path.join(self.outdir,u"index_svg.xhtml"),u"index_svg.xhtml")
|
||||||
|
zipUpDir(svgzip, self.outdir, u"svg")
|
||||||
|
zipUpDir(svgzip, self.outdir, u"img")
|
||||||
|
svgzip.close()
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
if os.path.isdir(self.outdir):
|
||||||
|
shutil.rmtree(self.outdir, True)
|
||||||
|
|
||||||
|
def usage(progname):
|
||||||
|
print u"Removes DRM protection from Topaz ebooks and extracts the contents"
|
||||||
|
print u"Usage:"
|
||||||
|
print u" {0} [-k <kindle.k4i>] [-p <comma separated PIDs>] [-s <comma separated Kindle serial numbers>] <infile> <outdir>".format(progname)
|
||||||
|
|
||||||
|
# Main
|
||||||
|
def cli_main():
|
||||||
|
argv=unicode_argv()
|
||||||
|
progname = os.path.basename(argv[0])
|
||||||
|
print u"TopazExtract v{0}.".format(__version__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
opts, args = getopt.getopt(argv[1:], "k:p:s:x")
|
||||||
|
except getopt.GetoptError, err:
|
||||||
|
print u"Error in options or arguments: {0}".format(err.args[0])
|
||||||
|
usage(progname)
|
||||||
|
return 1
|
||||||
|
if len(args)<2:
|
||||||
|
usage(progname)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
infile = args[0]
|
||||||
|
outdir = args[1]
|
||||||
|
if not os.path.isfile(infile):
|
||||||
|
print u"Input File {0} Does Not Exist.".format(infile)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if not os.path.exists(outdir):
|
||||||
|
print u"Output Directory {0} Does Not Exist.".format(outdir)
|
||||||
|
return 1
|
||||||
|
|
||||||
|
kDatabaseFiles = []
|
||||||
|
serials = []
|
||||||
|
pids = []
|
||||||
|
|
||||||
|
for o, a in opts:
|
||||||
|
if o == '-k':
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -k")
|
||||||
|
kDatabaseFiles.append(a)
|
||||||
|
if o == '-p':
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -p")
|
||||||
|
pids = a.split(',')
|
||||||
|
if o == '-s':
|
||||||
|
if a == None :
|
||||||
|
raise DrmException("Invalid parameter for -s")
|
||||||
|
serials = [serial.replace(" ","") for serial in a.split(',')]
|
||||||
|
|
||||||
|
bookname = os.path.splitext(os.path.basename(infile))[0]
|
||||||
|
|
||||||
|
tb = TopazBook(infile)
|
||||||
|
title = tb.getBookTitle()
|
||||||
|
print u"Processing Book: {0}".format(title)
|
||||||
|
md1, md2 = tb.getPIDMetaInfo()
|
||||||
|
pids.extend(kgenpids.getPidList(md1, md2, serials, kDatabaseFiles))
|
||||||
|
|
||||||
|
try:
|
||||||
|
print u"Decrypting Book"
|
||||||
|
tb.processBook(pids)
|
||||||
|
|
||||||
|
print u" Creating HTML ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, bookname + u"_nodrm.htmlz")
|
||||||
|
tb.getFile(zipname)
|
||||||
|
|
||||||
|
print u" Creating SVG ZIP Archive"
|
||||||
|
zipname = os.path.join(outdir, bookname + u"_SVG.zip")
|
||||||
|
tb.getSVGZip(zipname)
|
||||||
|
|
||||||
|
# removing internal temporary directory of pieces
|
||||||
|
tb.cleanup()
|
||||||
|
|
||||||
|
except DrmException, e:
|
||||||
|
print u"Decryption failed\n{0}".format(traceback.format_exc())
|
||||||
|
|
||||||
|
try:
|
||||||
|
tb.cleanup()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return 1
|
||||||
|
|
||||||
|
except Exception, e:
|
||||||
|
print u"Decryption failed\m{0}".format(traceback.format_exc())
|
||||||
|
try:
|
||||||
|
tb.cleanup()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return 1
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.stdout=SafeUnbuffered(sys.stdout)
|
||||||
|
sys.stderr=SafeUnbuffered(sys.stderr)
|
||||||
|
sys.exit(cli_main())
|
||||||
|
|
|
@ -1,538 +1,39 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# topazextract.py
|
from __future__ import with_statement
|
||||||
# Mostly written by some_updates based on code from many others
|
|
||||||
|
|
||||||
# Changelog
|
__license__ = 'GPL v3'
|
||||||
# 4.9 - moved unicode_argv call inside main for Windows DeDRM compatibility
|
|
||||||
# 5.0 - Fixed potential unicode problem with command line interface
|
|
||||||
|
|
||||||
__version__ = '5.0'
|
DETAILED_MESSAGE = \
|
||||||
|
'You have personal information stored in this plugin\'s customization '+ \
|
||||||
|
'string from a previous version of this plugin.\n\n'+ \
|
||||||
|
'This new version of the plugin can convert that info '+ \
|
||||||
|
'into key data that the new plugin can then use (which doesn\'t '+ \
|
||||||
|
'require personal information to be stored/displayed in an insecure '+ \
|
||||||
|
'manner like the old plugin did).\n\nIf you choose NOT to migrate this data at this time '+ \
|
||||||
|
'you will be prompted to save that personal data to a file elsewhere; and you\'ll have '+ \
|
||||||
|
'to manually re-configure this plugin with your information.\n\nEither way... ' + \
|
||||||
|
'this new version of the plugin will not be responsible for storing that personal '+ \
|
||||||
|
'info in plain sight any longer.'
|
||||||
|
|
||||||
import sys
|
def uStrCmp (s1, s2, caseless=False):
|
||||||
import os, csv, getopt
|
import unicodedata as ud
|
||||||
import zlib, zipfile, tempfile, shutil
|
str1 = s1 if isinstance(s1, unicode) else unicode(s1)
|
||||||
import traceback
|
str2 = s2 if isinstance(s2, unicode) else unicode(s2)
|
||||||
from struct import pack
|
if caseless:
|
||||||
from struct import unpack
|
return ud.normalize('NFC', str1.lower()) == ud.normalize('NFC', str2.lower())
|
||||||
from alfcrypto import Topaz_Cipher
|
|
||||||
|
|
||||||
class SafeUnbuffered:
|
|
||||||
def __init__(self, stream):
|
|
||||||
self.stream = stream
|
|
||||||
self.encoding = stream.encoding
|
|
||||||
if self.encoding == None:
|
|
||||||
self.encoding = "utf-8"
|
|
||||||
def write(self, data):
|
|
||||||
if isinstance(data,unicode):
|
|
||||||
data = data.encode(self.encoding,"replace")
|
|
||||||
self.stream.write(data)
|
|
||||||
self.stream.flush()
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
return getattr(self.stream, attr)
|
|
||||||
|
|
||||||
iswindows = sys.platform.startswith('win')
|
|
||||||
isosx = sys.platform.startswith('darwin')
|
|
||||||
|
|
||||||
def unicode_argv():
|
|
||||||
if iswindows:
|
|
||||||
# Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode
|
|
||||||
# strings.
|
|
||||||
|
|
||||||
# Versions 2.x of Python don't support Unicode in sys.argv on
|
|
||||||
# Windows, with the underlying Windows API instead replacing multi-byte
|
|
||||||
# characters with '?'.
|
|
||||||
|
|
||||||
|
|
||||||
from ctypes import POINTER, byref, cdll, c_int, windll
|
|
||||||
from ctypes.wintypes import LPCWSTR, LPWSTR
|
|
||||||
|
|
||||||
GetCommandLineW = cdll.kernel32.GetCommandLineW
|
|
||||||
GetCommandLineW.argtypes = []
|
|
||||||
GetCommandLineW.restype = LPCWSTR
|
|
||||||
|
|
||||||
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
|
|
||||||
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
|
|
||||||
CommandLineToArgvW.restype = POINTER(LPWSTR)
|
|
||||||
|
|
||||||
cmd = GetCommandLineW()
|
|
||||||
argc = c_int(0)
|
|
||||||
argv = CommandLineToArgvW(cmd, byref(argc))
|
|
||||||
if argc.value > 0:
|
|
||||||
# Remove Python executable and commands if present
|
|
||||||
start = argc.value - len(sys.argv)
|
|
||||||
return [argv[i] for i in
|
|
||||||
xrange(start, argc.value)]
|
|
||||||
# if we don't have any arguments at all, just pass back script name
|
|
||||||
# this should never happen
|
|
||||||
return [u"mobidedrm.py"]
|
|
||||||
else:
|
else:
|
||||||
argvencoding = sys.stdin.encoding
|
return ud.normalize('NFC', str1) == ud.normalize('NFC', str2)
|
||||||
if argvencoding == None:
|
|
||||||
argvencoding = 'utf-8'
|
|
||||||
return [arg if (type(arg) == unicode) else unicode(arg,argvencoding) for arg in sys.argv]
|
|
||||||
|
|
||||||
#global switch
|
def parseCustString(keystuff):
|
||||||
debug = False
|
userkeys = []
|
||||||
|
ar = keystuff.split(':')
|
||||||
if 'calibre' in sys.modules:
|
for i in ar:
|
||||||
inCalibre = True
|
|
||||||
from calibre_plugins.dedrm import kgenpids
|
|
||||||
else:
|
|
||||||
inCalibre = False
|
|
||||||
import kgenpids
|
|
||||||
|
|
||||||
|
|
||||||
class DrmException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# recursive zip creation support routine
|
|
||||||
def zipUpDir(myzip, tdir, localname):
|
|
||||||
currentdir = tdir
|
|
||||||
if localname != u"":
|
|
||||||
currentdir = os.path.join(currentdir,localname)
|
|
||||||
list = os.listdir(currentdir)
|
|
||||||
for file in list:
|
|
||||||
afilename = file
|
|
||||||
localfilePath = os.path.join(localname, afilename)
|
|
||||||
realfilePath = os.path.join(currentdir,file)
|
|
||||||
if os.path.isfile(realfilePath):
|
|
||||||
myzip.write(realfilePath, localfilePath)
|
|
||||||
elif os.path.isdir(realfilePath):
|
|
||||||
zipUpDir(myzip, tdir, localfilePath)
|
|
||||||
|
|
||||||
#
|
|
||||||
# Utility routines
|
|
||||||
#
|
|
||||||
|
|
||||||
# Get a 7 bit encoded number from file
|
|
||||||
def bookReadEncodedNumber(fo):
|
|
||||||
flag = False
|
|
||||||
data = ord(fo.read(1))
|
|
||||||
if data == 0xFF:
|
|
||||||
flag = True
|
|
||||||
data = ord(fo.read(1))
|
|
||||||
if data >= 0x80:
|
|
||||||
datax = (data & 0x7F)
|
|
||||||
while data >= 0x80 :
|
|
||||||
data = ord(fo.read(1))
|
|
||||||
datax = (datax <<7) + (data & 0x7F)
|
|
||||||
data = datax
|
|
||||||
if flag:
|
|
||||||
data = -data
|
|
||||||
return data
|
|
||||||
|
|
||||||
# Get a length prefixed string from file
|
|
||||||
def bookReadString(fo):
|
|
||||||
stringLength = bookReadEncodedNumber(fo)
|
|
||||||
return unpack(str(stringLength)+'s',fo.read(stringLength))[0]
|
|
||||||
|
|
||||||
#
|
|
||||||
# crypto routines
|
|
||||||
#
|
|
||||||
|
|
||||||
# Context initialisation for the Topaz Crypto
|
|
||||||
def topazCryptoInit(key):
|
|
||||||
return Topaz_Cipher().ctx_init(key)
|
|
||||||
|
|
||||||
# ctx1 = 0x0CAFFE19E
|
|
||||||
# for keyChar in key:
|
|
||||||
# keyByte = ord(keyChar)
|
|
||||||
# ctx2 = ctx1
|
|
||||||
# ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
|
|
||||||
# return [ctx1,ctx2]
|
|
||||||
|
|
||||||
# decrypt data with the context prepared by topazCryptoInit()
|
|
||||||
def topazCryptoDecrypt(data, ctx):
|
|
||||||
return Topaz_Cipher().decrypt(data, ctx)
|
|
||||||
# ctx1 = ctx[0]
|
|
||||||
# ctx2 = ctx[1]
|
|
||||||
# plainText = ""
|
|
||||||
# for dataChar in data:
|
|
||||||
# dataByte = ord(dataChar)
|
|
||||||
# m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
|
|
||||||
# ctx2 = ctx1
|
|
||||||
# ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
|
|
||||||
# plainText += chr(m)
|
|
||||||
# return plainText
|
|
||||||
|
|
||||||
# Decrypt data with the PID
|
|
||||||
def decryptRecord(data,PID):
|
|
||||||
ctx = topazCryptoInit(PID)
|
|
||||||
return topazCryptoDecrypt(data, ctx)
|
|
||||||
|
|
||||||
# Try to decrypt a dkey record (contains the bookPID)
|
|
||||||
def decryptDkeyRecord(data,PID):
|
|
||||||
record = decryptRecord(data,PID)
|
|
||||||
fields = unpack('3sB8sB8s3s',record)
|
|
||||||
if fields[0] != 'PID' or fields[5] != 'pid' :
|
|
||||||
raise DrmException(u"Didn't find PID magic numbers in record")
|
|
||||||
elif fields[1] != 8 or fields[3] != 8 :
|
|
||||||
raise DrmException(u"Record didn't contain correct length fields")
|
|
||||||
elif fields[2] != PID :
|
|
||||||
raise DrmException(u"Record didn't contain PID")
|
|
||||||
return fields[4]
|
|
||||||
|
|
||||||
# Decrypt all dkey records (contain the book PID)
|
|
||||||
def decryptDkeyRecords(data,PID):
|
|
||||||
nbKeyRecords = ord(data[0])
|
|
||||||
records = []
|
|
||||||
data = data[1:]
|
|
||||||
for i in range (0,nbKeyRecords):
|
|
||||||
length = ord(data[0])
|
|
||||||
try:
|
try:
|
||||||
key = decryptDkeyRecord(data[1:length+1],PID)
|
name, ccn = i.split(',')
|
||||||
records.append(key)
|
# Generate Barnes & Noble EPUB user key from name and credit card number.
|
||||||
except DrmException:
|
userkeys.append(generate_key(name, ccn))
|
||||||
pass
|
|
||||||
data = data[1+length:]
|
|
||||||
if len(records) == 0:
|
|
||||||
raise DrmException(u"BookKey Not Found")
|
|
||||||
return records
|
|
||||||
|
|
||||||
|
|
||||||
class TopazBook:
|
|
||||||
def __init__(self, filename):
|
|
||||||
self.fo = file(filename, 'rb')
|
|
||||||
self.outdir = tempfile.mkdtemp()
|
|
||||||
# self.outdir = 'rawdat'
|
|
||||||
self.bookPayloadOffset = 0
|
|
||||||
self.bookHeaderRecords = {}
|
|
||||||
self.bookMetadata = {}
|
|
||||||
self.bookKey = None
|
|
||||||
magic = unpack('4s',self.fo.read(4))[0]
|
|
||||||
if magic != 'TPZ0':
|
|
||||||
raise DrmException(u"Parse Error : Invalid Header, not a Topaz file")
|
|
||||||
self.parseTopazHeaders()
|
|
||||||
self.parseMetadata()
|
|
||||||
|
|
||||||
def parseTopazHeaders(self):
|
|
||||||
def bookReadHeaderRecordData():
|
|
||||||
# Read and return the data of one header record at the current book file position
|
|
||||||
# [[offset,decompressedLength,compressedLength],...]
|
|
||||||
nbValues = bookReadEncodedNumber(self.fo)
|
|
||||||
if debug: print "%d records in header " % nbValues,
|
|
||||||
values = []
|
|
||||||
for i in range (0,nbValues):
|
|
||||||
values.append([bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo)])
|
|
||||||
return values
|
|
||||||
def parseTopazHeaderRecord():
|
|
||||||
# Read and parse one header record at the current book file position and return the associated data
|
|
||||||
# [[offset,decompressedLength,compressedLength],...]
|
|
||||||
if ord(self.fo.read(1)) != 0x63:
|
|
||||||
raise DrmException(u"Parse Error : Invalid Header")
|
|
||||||
tag = bookReadString(self.fo)
|
|
||||||
record = bookReadHeaderRecordData()
|
|
||||||
return [tag,record]
|
|
||||||
nbRecords = bookReadEncodedNumber(self.fo)
|
|
||||||
if debug: print "Headers: %d" % nbRecords
|
|
||||||
for i in range (0,nbRecords):
|
|
||||||
result = parseTopazHeaderRecord()
|
|
||||||
if debug: print result[0], ": ", result[1]
|
|
||||||
self.bookHeaderRecords[result[0]] = result[1]
|
|
||||||
if ord(self.fo.read(1)) != 0x64 :
|
|
||||||
raise DrmException(u"Parse Error : Invalid Header")
|
|
||||||
self.bookPayloadOffset = self.fo.tell()
|
|
||||||
|
|
||||||
def parseMetadata(self):
|
|
||||||
# Parse the metadata record from the book payload and return a list of [key,values]
|
|
||||||
self.fo.seek(self.bookPayloadOffset + self.bookHeaderRecords['metadata'][0][0])
|
|
||||||
tag = bookReadString(self.fo)
|
|
||||||
if tag != 'metadata' :
|
|
||||||
raise DrmException(u"Parse Error : Record Names Don't Match")
|
|
||||||
flags = ord(self.fo.read(1))
|
|
||||||
nbRecords = ord(self.fo.read(1))
|
|
||||||
if debug: print "Metadata Records: %d" % nbRecords
|
|
||||||
for i in range (0,nbRecords) :
|
|
||||||
keyval = bookReadString(self.fo)
|
|
||||||
content = bookReadString(self.fo)
|
|
||||||
if debug: print keyval
|
|
||||||
if debug: print content
|
|
||||||
self.bookMetadata[keyval] = content
|
|
||||||
return self.bookMetadata
|
|
||||||
|
|
||||||
def getPIDMetaInfo(self):
|
|
||||||
keysRecord = self.bookMetadata.get('keys','')
|
|
||||||
keysRecordRecord = ''
|
|
||||||
if keysRecord != '':
|
|
||||||
keylst = keysRecord.split(',')
|
|
||||||
for keyval in keylst:
|
|
||||||
keysRecordRecord += self.bookMetadata.get(keyval,'')
|
|
||||||
return keysRecord, keysRecordRecord
|
|
||||||
|
|
||||||
def getBookTitle(self):
|
|
||||||
title = ''
|
|
||||||
if 'Title' in self.bookMetadata:
|
|
||||||
title = self.bookMetadata['Title']
|
|
||||||
return title.decode('utf-8')
|
|
||||||
|
|
||||||
def setBookKey(self, key):
|
|
||||||
self.bookKey = key
|
|
||||||
|
|
||||||
def getBookPayloadRecord(self, name, index):
|
|
||||||
# Get a record in the book payload, given its name and index.
|
|
||||||
# decrypted and decompressed if necessary
|
|
||||||
encrypted = False
|
|
||||||
compressed = False
|
|
||||||
try:
|
|
||||||
recordOffset = self.bookHeaderRecords[name][index][0]
|
|
||||||
except:
|
|
||||||
raise DrmException("Parse Error : Invalid Record, record not found")
|
|
||||||
|
|
||||||
self.fo.seek(self.bookPayloadOffset + recordOffset)
|
|
||||||
|
|
||||||
tag = bookReadString(self.fo)
|
|
||||||
if tag != name :
|
|
||||||
raise DrmException("Parse Error : Invalid Record, record name doesn't match")
|
|
||||||
|
|
||||||
recordIndex = bookReadEncodedNumber(self.fo)
|
|
||||||
if recordIndex < 0 :
|
|
||||||
encrypted = True
|
|
||||||
recordIndex = -recordIndex -1
|
|
||||||
|
|
||||||
if recordIndex != index :
|
|
||||||
raise DrmException("Parse Error : Invalid Record, index doesn't match")
|
|
||||||
|
|
||||||
if (self.bookHeaderRecords[name][index][2] > 0):
|
|
||||||
compressed = True
|
|
||||||
record = self.fo.read(self.bookHeaderRecords[name][index][2])
|
|
||||||
else:
|
|
||||||
record = self.fo.read(self.bookHeaderRecords[name][index][1])
|
|
||||||
|
|
||||||
if encrypted:
|
|
||||||
if self.bookKey:
|
|
||||||
ctx = topazCryptoInit(self.bookKey)
|
|
||||||
record = topazCryptoDecrypt(record,ctx)
|
|
||||||
else :
|
|
||||||
raise DrmException("Error: Attempt to decrypt without bookKey")
|
|
||||||
|
|
||||||
if compressed:
|
|
||||||
record = zlib.decompress(record)
|
|
||||||
|
|
||||||
return record
|
|
||||||
|
|
||||||
def processBook(self, pidlst):
|
|
||||||
raw = 0
|
|
||||||
fixedimage=True
|
|
||||||
try:
|
|
||||||
keydata = self.getBookPayloadRecord('dkey', 0)
|
|
||||||
except DrmException, e:
|
|
||||||
print u"no dkey record found, book may not be encrypted"
|
|
||||||
print u"attempting to extrct files without a book key"
|
|
||||||
self.createBookDirectory()
|
|
||||||
self.extractFiles()
|
|
||||||
print u"Successfully Extracted Topaz contents"
|
|
||||||
if inCalibre:
|
|
||||||
from calibre_plugins.dedrm import genbook
|
|
||||||
else:
|
|
||||||
import genbook
|
|
||||||
|
|
||||||
rv = genbook.generateBook(self.outdir, raw, fixedimage)
|
|
||||||
if rv == 0:
|
|
||||||
print u"Book Successfully generated."
|
|
||||||
return rv
|
|
||||||
|
|
||||||
# try each pid to decode the file
|
|
||||||
bookKey = None
|
|
||||||
for pid in pidlst:
|
|
||||||
# use 8 digit pids here
|
|
||||||
pid = pid[0:8]
|
|
||||||
print u"Trying: {0}".format(pid)
|
|
||||||
bookKeys = []
|
|
||||||
data = keydata
|
|
||||||
try:
|
|
||||||
bookKeys+=decryptDkeyRecords(data,pid)
|
|
||||||
except DrmException, e:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
bookKey = bookKeys[0]
|
|
||||||
print u"Book Key Found! ({0})".format(bookKey.encode('hex'))
|
|
||||||
break
|
|
||||||
|
|
||||||
if not bookKey:
|
|
||||||
raise DrmException(u"No key found in {0:d} keys tried. Read the FAQs at Alf's blog: http://apprenticealf.wordpress.com/".format(len(pidlst)))
|
|
||||||
|
|
||||||
self.setBookKey(bookKey)
|
|
||||||
self.createBookDirectory()
|
|
||||||
self.extractFiles()
|
|
||||||
print u"Successfully Extracted Topaz contents"
|
|
||||||
if inCalibre:
|
|
||||||
from calibre_plugins.dedrm import genbook
|
|
||||||
else:
|
|
||||||
import genbook
|
|
||||||
|
|
||||||
rv = genbook.generateBook(self.outdir, raw, fixedimage)
|
|
||||||
if rv == 0:
|
|
||||||
print u"Book Successfully generated"
|
|
||||||
return rv
|
|
||||||
|
|
||||||
def createBookDirectory(self):
|
|
||||||
outdir = self.outdir
|
|
||||||
# create output directory structure
|
|
||||||
if not os.path.exists(outdir):
|
|
||||||
os.makedirs(outdir)
|
|
||||||
destdir = os.path.join(outdir,u"img")
|
|
||||||
if not os.path.exists(destdir):
|
|
||||||
os.makedirs(destdir)
|
|
||||||
destdir = os.path.join(outdir,u"color_img")
|
|
||||||
if not os.path.exists(destdir):
|
|
||||||
os.makedirs(destdir)
|
|
||||||
destdir = os.path.join(outdir,u"page")
|
|
||||||
if not os.path.exists(destdir):
|
|
||||||
os.makedirs(destdir)
|
|
||||||
destdir = os.path.join(outdir,u"glyphs")
|
|
||||||
if not os.path.exists(destdir):
|
|
||||||
os.makedirs(destdir)
|
|
||||||
|
|
||||||
def extractFiles(self):
|
|
||||||
outdir = self.outdir
|
|
||||||
for headerRecord in self.bookHeaderRecords:
|
|
||||||
name = headerRecord
|
|
||||||
if name != 'dkey':
|
|
||||||
ext = u".dat"
|
|
||||||
if name == 'img': ext = u".jpg"
|
|
||||||
if name == 'color' : ext = u".jpg"
|
|
||||||
print u"Processing Section: {0}\n. . .".format(name),
|
|
||||||
for index in range (0,len(self.bookHeaderRecords[name])) :
|
|
||||||
fname = u"{0}{1:04d}{2}".format(name,index,ext)
|
|
||||||
destdir = outdir
|
|
||||||
if name == 'img':
|
|
||||||
destdir = os.path.join(outdir,u"img")
|
|
||||||
if name == 'color':
|
|
||||||
destdir = os.path.join(outdir,u"color_img")
|
|
||||||
if name == 'page':
|
|
||||||
destdir = os.path.join(outdir,u"page")
|
|
||||||
if name == 'glyphs':
|
|
||||||
destdir = os.path.join(outdir,u"glyphs")
|
|
||||||
outputFile = os.path.join(destdir,fname)
|
|
||||||
print u".",
|
|
||||||
record = self.getBookPayloadRecord(name,index)
|
|
||||||
if record != '':
|
|
||||||
file(outputFile, 'wb').write(record)
|
|
||||||
print u" "
|
|
||||||
|
|
||||||
def getFile(self, zipname):
|
|
||||||
htmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
htmlzip.write(os.path.join(self.outdir,u"book.html"),u"book.html")
|
|
||||||
htmlzip.write(os.path.join(self.outdir,u"book.opf"),u"book.opf")
|
|
||||||
if os.path.isfile(os.path.join(self.outdir,u"cover.jpg")):
|
|
||||||
htmlzip.write(os.path.join(self.outdir,u"cover.jpg"),u"cover.jpg")
|
|
||||||
htmlzip.write(os.path.join(self.outdir,u"style.css"),u"style.css")
|
|
||||||
zipUpDir(htmlzip, self.outdir, u"img")
|
|
||||||
htmlzip.close()
|
|
||||||
|
|
||||||
def getBookType(self):
|
|
||||||
return u"Topaz"
|
|
||||||
|
|
||||||
def getBookExtension(self):
|
|
||||||
return u".htmlz"
|
|
||||||
|
|
||||||
def getSVGZip(self, zipname):
|
|
||||||
svgzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
|
|
||||||
svgzip.write(os.path.join(self.outdir,u"index_svg.xhtml"),u"index_svg.xhtml")
|
|
||||||
zipUpDir(svgzip, self.outdir, u"svg")
|
|
||||||
zipUpDir(svgzip, self.outdir, u"img")
|
|
||||||
svgzip.close()
|
|
||||||
|
|
||||||
def cleanup(self):
|
|
||||||
if os.path.isdir(self.outdir):
|
|
||||||
shutil.rmtree(self.outdir, True)
|
|
||||||
|
|
||||||
def usage(progname):
|
|
||||||
print u"Removes DRM protection from Topaz ebooks and extracts the contents"
|
|
||||||
print u"Usage:"
|
|
||||||
print u" {0} [-k <kindle.k4i>] [-p <comma separated PIDs>] [-s <comma separated Kindle serial numbers>] <infile> <outdir>".format(progname)
|
|
||||||
|
|
||||||
# Main
|
|
||||||
def cli_main():
|
|
||||||
argv=unicode_argv()
|
|
||||||
progname = os.path.basename(argv[0])
|
|
||||||
print u"TopazExtract v{0}.".format(__version__)
|
|
||||||
|
|
||||||
try:
|
|
||||||
opts, args = getopt.getopt(argv[1:], "k:p:s:x")
|
|
||||||
except getopt.GetoptError, err:
|
|
||||||
print u"Error in options or arguments: {0}".format(err.args[0])
|
|
||||||
usage(progname)
|
|
||||||
return 1
|
|
||||||
if len(args)<2:
|
|
||||||
usage(progname)
|
|
||||||
return 1
|
|
||||||
|
|
||||||
infile = args[0]
|
|
||||||
outdir = args[1]
|
|
||||||
if not os.path.isfile(infile):
|
|
||||||
print u"Input File {0} Does Not Exist.".format(infile)
|
|
||||||
return 1
|
|
||||||
|
|
||||||
if not os.path.exists(outdir):
|
|
||||||
print u"Output Directory {0} Does Not Exist.".format(outdir)
|
|
||||||
return 1
|
|
||||||
|
|
||||||
kDatabaseFiles = []
|
|
||||||
serials = []
|
|
||||||
pids = []
|
|
||||||
|
|
||||||
for o, a in opts:
|
|
||||||
if o == '-k':
|
|
||||||
if a == None :
|
|
||||||
raise DrmException("Invalid parameter for -k")
|
|
||||||
kDatabaseFiles.append(a)
|
|
||||||
if o == '-p':
|
|
||||||
if a == None :
|
|
||||||
raise DrmException("Invalid parameter for -p")
|
|
||||||
pids = a.split(',')
|
|
||||||
if o == '-s':
|
|
||||||
if a == None :
|
|
||||||
raise DrmException("Invalid parameter for -s")
|
|
||||||
serials = [serial.replace(" ","") for serial in a.split(',')]
|
|
||||||
|
|
||||||
bookname = os.path.splitext(os.path.basename(infile))[0]
|
|
||||||
|
|
||||||
tb = TopazBook(infile)
|
|
||||||
title = tb.getBookTitle()
|
|
||||||
print u"Processing Book: {0}".format(title)
|
|
||||||
md1, md2 = tb.getPIDMetaInfo()
|
|
||||||
pids.extend(kgenpids.getPidList(md1, md2, serials, kDatabaseFiles))
|
|
||||||
|
|
||||||
try:
|
|
||||||
print u"Decrypting Book"
|
|
||||||
tb.processBook(pids)
|
|
||||||
|
|
||||||
print u" Creating HTML ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + u"_nodrm.htmlz")
|
|
||||||
tb.getFile(zipname)
|
|
||||||
|
|
||||||
print u" Creating SVG ZIP Archive"
|
|
||||||
zipname = os.path.join(outdir, bookname + u"_SVG.zip")
|
|
||||||
tb.getSVGZip(zipname)
|
|
||||||
|
|
||||||
# removing internal temporary directory of pieces
|
|
||||||
tb.cleanup()
|
|
||||||
|
|
||||||
except DrmException, e:
|
|
||||||
print u"Decryption failed\n{0}".format(traceback.format_exc())
|
|
||||||
|
|
||||||
try:
|
|
||||||
tb.cleanup()
|
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
return 1
|
return userkeys
|
||||||
|
|
||||||
except Exception, e:
|
|
||||||
print u"Decryption failed\m{0}".format(traceback.format_exc())
|
|
||||||
try:
|
|
||||||
tb.cleanup()
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return 1
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
sys.stdout=SafeUnbuffered(sys.stdout)
|
|
||||||
sys.stderr=SafeUnbuffered(sys.stderr)
|
|
||||||
sys.exit(cli_main())
|
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
|
||||||
|
__license__ = 'GPL v3'
|
||||||
|
|
||||||
|
# Standard Python modules.
|
||||||
|
import os, sys, re, hashlib
|
||||||
|
from calibre_plugins.dedrm.__init__ import PLUGIN_NAME, PLUGIN_VERSION
|
||||||
|
|
||||||
|
def WineGetKeys(scriptpath, extension, wineprefix=""):
|
||||||
|
import subprocess
|
||||||
|
from subprocess import Popen, PIPE, STDOUT
|
||||||
|
|
||||||
|
import subasyncio
|
||||||
|
from subasyncio import Process
|
||||||
|
|
||||||
|
if extension == u".k4i":
|
||||||
|
import json
|
||||||
|
|
||||||
|
basepath, script = os.path.split(scriptpath)
|
||||||
|
print u"{0} v{1}: Running {2} under Wine".format(PLUGIN_NAME, PLUGIN_VERSION, script)
|
||||||
|
|
||||||
|
outdirpath = os.path.join(basepath, u"winekeysdir")
|
||||||
|
if not os.path.exists(outdirpath):
|
||||||
|
os.makedirs(outdirpath)
|
||||||
|
|
||||||
|
if wineprefix != "" and os.path.exists(wineprefix):
|
||||||
|
cmdline = u"WINEPREFIX=\"{2}\" wine python.exe \"{0}\" \"{1}\"".format(scriptpath,outdirpath,wineprefix)
|
||||||
|
else:
|
||||||
|
cmdline = u"wine python.exe \"{0}\" \"{1}\"".format(scriptpath,outdirpath)
|
||||||
|
print u"{0} v{1}: Command line: “{2}”".format(PLUGIN_NAME, PLUGIN_VERSION, cmdline)
|
||||||
|
|
||||||
|
try:
|
||||||
|
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
||||||
|
p2 = Process(cmdline, shell=True, bufsize=1, stdin=None, stdout=sys.stdout, stderr=STDOUT, close_fds=False)
|
||||||
|
result = p2.wait("wait")
|
||||||
|
except Exception, e:
|
||||||
|
print u"{0} v{1}: Wine subprocess call error: {2}".format(PLUGIN_NAME, PLUGIN_VERSION, e.args[0])
|
||||||
|
return []
|
||||||
|
|
||||||
|
winekeys = []
|
||||||
|
# get any files with extension in the output dir
|
||||||
|
files = [f for f in os.listdir(outdirpath) if f.endswith(extension)]
|
||||||
|
for filename in files:
|
||||||
|
try:
|
||||||
|
fpath = os.path.join(outdirpath, filename)
|
||||||
|
with open(fpath, 'rb') as keyfile:
|
||||||
|
if extension == u".k4i":
|
||||||
|
new_key_value = json.loads(keyfile.read())
|
||||||
|
else:
|
||||||
|
new_key_value = keyfile.read()
|
||||||
|
winekeys.append(new_key_value)
|
||||||
|
except:
|
||||||
|
print u"{0} v{1}: Error loading file {2}".format(PLUGIN_NAME, PLUGIN_VERSION, filename)
|
||||||
|
traceback.print_exc()
|
||||||
|
os.remove(fpath)
|
||||||
|
print u"{0} v{1}: Found and decrypted {2} {3}".format(PLUGIN_NAME, PLUGIN_VERSION, len(winekeys), u"key file" if len(winekeys) == 1 else u"key files")
|
||||||
|
return winekeys
|
File diff suppressed because it is too large
Load Diff
|
@ -1,60 +1,188 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import with_statement
|
# zipfix.py, version 1.1
|
||||||
|
# Copyright © 2010-2013 by some_updates, DiapDealer and Apprentice Alf
|
||||||
|
|
||||||
|
# Released under the terms of the GNU General Public Licence, version 3
|
||||||
|
# <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
# Revision history:
|
||||||
|
# 1.0 - Initial release
|
||||||
|
# 1.1 - Updated to handle zip file metadata correctly
|
||||||
|
|
||||||
|
"""
|
||||||
|
Re-write zip (or ePub) fixing problems with file names (and mimetype entry).
|
||||||
|
"""
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
|
__version__ = "1.1"
|
||||||
|
|
||||||
# Standard Python modules.
|
import sys
|
||||||
import os, sys, re, hashlib
|
import zlib
|
||||||
from calibre_plugins.dedrm.__init__ import PLUGIN_NAME, PLUGIN_VERSION
|
import zipfilerugged
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import getopt
|
||||||
|
from struct import unpack
|
||||||
|
|
||||||
def WineGetKeys(scriptpath, extension, wineprefix=""):
|
|
||||||
import subprocess
|
|
||||||
from subprocess import Popen, PIPE, STDOUT
|
|
||||||
|
|
||||||
import subasyncio
|
_FILENAME_LEN_OFFSET = 26
|
||||||
from subasyncio import Process
|
_EXTRA_LEN_OFFSET = 28
|
||||||
|
_FILENAME_OFFSET = 30
|
||||||
|
_MAX_SIZE = 64 * 1024
|
||||||
|
_MIMETYPE = 'application/epub+zip'
|
||||||
|
|
||||||
if extension == u".k4i":
|
class ZipInfo(zipfilerugged.ZipInfo):
|
||||||
import json
|
def __init__(self, *args, **kwargs):
|
||||||
|
if 'compress_type' in kwargs:
|
||||||
|
compress_type = kwargs.pop('compress_type')
|
||||||
|
super(ZipInfo, self).__init__(*args, **kwargs)
|
||||||
|
self.compress_type = compress_type
|
||||||
|
|
||||||
basepath, script = os.path.split(scriptpath)
|
class fixZip:
|
||||||
print u"{0} v{1}: Running {2} under Wine".format(PLUGIN_NAME, PLUGIN_VERSION, script)
|
def __init__(self, zinput, zoutput):
|
||||||
|
self.ztype = 'zip'
|
||||||
|
if zinput.lower().find('.epub') >= 0 :
|
||||||
|
self.ztype = 'epub'
|
||||||
|
self.inzip = zipfilerugged.ZipFile(zinput,'r')
|
||||||
|
self.outzip = zipfilerugged.ZipFile(zoutput,'w')
|
||||||
|
# open the input zip for reading only as a raw file
|
||||||
|
self.bzf = file(zinput,'rb')
|
||||||
|
|
||||||
outdirpath = os.path.join(basepath, u"winekeysdir")
|
def getlocalname(self, zi):
|
||||||
if not os.path.exists(outdirpath):
|
local_header_offset = zi.header_offset
|
||||||
os.makedirs(outdirpath)
|
self.bzf.seek(local_header_offset + _FILENAME_LEN_OFFSET)
|
||||||
|
leninfo = self.bzf.read(2)
|
||||||
|
local_name_length, = unpack('<H', leninfo)
|
||||||
|
self.bzf.seek(local_header_offset + _FILENAME_OFFSET)
|
||||||
|
local_name = self.bzf.read(local_name_length)
|
||||||
|
return local_name
|
||||||
|
|
||||||
if wineprefix != "" and os.path.exists(wineprefix):
|
def uncompress(self, cmpdata):
|
||||||
cmdline = u"WINEPREFIX=\"{2}\" wine python.exe \"{0}\" \"{1}\"".format(scriptpath,outdirpath,wineprefix)
|
dc = zlib.decompressobj(-15)
|
||||||
else:
|
data = ''
|
||||||
cmdline = u"wine python.exe \"{0}\" \"{1}\"".format(scriptpath,outdirpath)
|
while len(cmpdata) > 0:
|
||||||
print u"{0} v{1}: Command line: “{2}”".format(PLUGIN_NAME, PLUGIN_VERSION, cmdline)
|
if len(cmpdata) > _MAX_SIZE :
|
||||||
|
newdata = cmpdata[0:_MAX_SIZE]
|
||||||
|
cmpdata = cmpdata[_MAX_SIZE:]
|
||||||
|
else:
|
||||||
|
newdata = cmpdata
|
||||||
|
cmpdata = ''
|
||||||
|
newdata = dc.decompress(newdata)
|
||||||
|
unprocessed = dc.unconsumed_tail
|
||||||
|
if len(unprocessed) == 0:
|
||||||
|
newdata += dc.flush()
|
||||||
|
data += newdata
|
||||||
|
cmpdata += unprocessed
|
||||||
|
unprocessed = ''
|
||||||
|
return data
|
||||||
|
|
||||||
|
def getfiledata(self, zi):
|
||||||
|
# get file name length and exta data length to find start of file data
|
||||||
|
local_header_offset = zi.header_offset
|
||||||
|
|
||||||
|
self.bzf.seek(local_header_offset + _FILENAME_LEN_OFFSET)
|
||||||
|
leninfo = self.bzf.read(2)
|
||||||
|
local_name_length, = unpack('<H', leninfo)
|
||||||
|
|
||||||
|
self.bzf.seek(local_header_offset + _EXTRA_LEN_OFFSET)
|
||||||
|
exinfo = self.bzf.read(2)
|
||||||
|
extra_field_length, = unpack('<H', exinfo)
|
||||||
|
|
||||||
|
self.bzf.seek(local_header_offset + _FILENAME_OFFSET + local_name_length + extra_field_length)
|
||||||
|
data = None
|
||||||
|
|
||||||
|
# if not compressed we are good to go
|
||||||
|
if zi.compress_type == zipfilerugged.ZIP_STORED:
|
||||||
|
data = self.bzf.read(zi.file_size)
|
||||||
|
|
||||||
|
# if compressed we must decompress it using zlib
|
||||||
|
if zi.compress_type == zipfilerugged.ZIP_DEFLATED:
|
||||||
|
cmpdata = self.bzf.read(zi.compress_size)
|
||||||
|
data = self.uncompress(cmpdata)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def fix(self):
|
||||||
|
# get the zipinfo for each member of the input archive
|
||||||
|
# and copy member over to output archive
|
||||||
|
# if problems exist with local vs central filename, fix them
|
||||||
|
|
||||||
|
# if epub write mimetype file first, with no compression
|
||||||
|
if self.ztype == 'epub':
|
||||||
|
# first get a ZipInfo with current time and no compression
|
||||||
|
mimeinfo = ZipInfo('mimetype',compress_type=zipfilerugged.ZIP_STORED)
|
||||||
|
mimeinfo.internal_attr = 1 # text file
|
||||||
|
try:
|
||||||
|
# if the mimetype is present, get its info, including time-stamp
|
||||||
|
oldmimeinfo = self.inzip.getinfo('mimetype')
|
||||||
|
# copy across useful fields
|
||||||
|
mimeinfo.date_time = oldmimeinfo.date_time
|
||||||
|
mimeinfo.comment = oldmimeinfo.comment
|
||||||
|
mimeinfo.extra = oldmimeinfo.extra
|
||||||
|
mimeinfo.internal_attr = oldmimeinfo.internal_attr
|
||||||
|
mimeinfo.external_attr = oldmimeinfo.external_attr
|
||||||
|
mimeinfo.create_system = oldmimeinfo.create_system
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
self.outzip.writestr(mimeinfo, _MIMETYPE)
|
||||||
|
|
||||||
|
# write the rest of the files
|
||||||
|
for zinfo in self.inzip.infolist():
|
||||||
|
if zinfo.filename != "mimetype" or self.ztype != 'epub':
|
||||||
|
data = None
|
||||||
|
try:
|
||||||
|
data = self.inzip.read(zinfo.filename)
|
||||||
|
except zipfilerugged.BadZipfile or zipfilerugged.error:
|
||||||
|
local_name = self.getlocalname(zinfo)
|
||||||
|
data = self.getfiledata(zinfo)
|
||||||
|
zinfo.filename = local_name
|
||||||
|
|
||||||
|
# create new ZipInfo with only the useful attributes from the old info
|
||||||
|
nzinfo = ZipInfo(zinfo.filename, zinfo.date_time, compress_type=zinfo.compress_type)
|
||||||
|
nzinfo.comment=zinfo.comment
|
||||||
|
nzinfo.extra=zinfo.extra
|
||||||
|
nzinfo.internal_attr=zinfo.internal_attr
|
||||||
|
nzinfo.external_attr=zinfo.external_attr
|
||||||
|
nzinfo.create_system=zinfo.create_system
|
||||||
|
self.outzip.writestr(nzinfo,data)
|
||||||
|
|
||||||
|
self.bzf.close()
|
||||||
|
self.inzip.close()
|
||||||
|
self.outzip.close()
|
||||||
|
|
||||||
|
|
||||||
|
def usage():
|
||||||
|
print """usage: zipfix.py inputzip outputzip
|
||||||
|
inputzip is the source zipfile to fix
|
||||||
|
outputzip is the fixed zip archive
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def repairBook(infile, outfile):
|
||||||
|
if not os.path.exists(infile):
|
||||||
|
print "Error: Input Zip File does not exist"
|
||||||
|
return 1
|
||||||
try:
|
try:
|
||||||
cmdline = cmdline.encode(sys.getfilesystemencoding())
|
fr = fixZip(infile, outfile)
|
||||||
p2 = Process(cmdline, shell=True, bufsize=1, stdin=None, stdout=sys.stdout, stderr=STDOUT, close_fds=False)
|
fr.fix()
|
||||||
result = p2.wait("wait")
|
return 0
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print u"{0} v{1}: Wine subprocess call error: {2}".format(PLUGIN_NAME, PLUGIN_VERSION, e.args[0])
|
print "Error Occurred ", e
|
||||||
return []
|
return 2
|
||||||
|
|
||||||
winekeys = []
|
|
||||||
# get any files with extension in the output dir
|
def main(argv=sys.argv):
|
||||||
files = [f for f in os.listdir(outdirpath) if f.endswith(extension)]
|
if len(argv)!=3:
|
||||||
for filename in files:
|
usage()
|
||||||
try:
|
return 1
|
||||||
fpath = os.path.join(outdirpath, filename)
|
infile = argv[1]
|
||||||
with open(fpath, 'rb') as keyfile:
|
outfile = argv[2]
|
||||||
if extension == u".k4i":
|
return repairBook(infile, outfile)
|
||||||
new_key_value = json.loads(keyfile.read())
|
|
||||||
else:
|
|
||||||
new_key_value = keyfile.read()
|
if __name__ == '__main__' :
|
||||||
winekeys.append(new_key_value)
|
sys.exit(main())
|
||||||
except:
|
|
||||||
print u"{0} v{1}: Error loading file {2}".format(PLUGIN_NAME, PLUGIN_VERSION, filename)
|
|
||||||
traceback.print_exc()
|
|
||||||
os.remove(fpath)
|
|
||||||
print u"{0} v{1}: Found and decrypted {2} {3}".format(PLUGIN_NAME, PLUGIN_VERSION, len(winekeys), u"key file" if len(winekeys) == 1 else u"key files")
|
|
||||||
return winekeys
|
|
||||||
|
|
|
@ -19,6 +19,7 @@ from __future__ import with_statement
|
||||||
# 1.6 - Fixed a problem getting the disk serial numbers
|
# 1.6 - Fixed a problem getting the disk serial numbers
|
||||||
# 1.7 - Work if TkInter is missing
|
# 1.7 - Work if TkInter is missing
|
||||||
# 1.8 - Fixes for Kindle for Mac, and non-ascii in Windows user names
|
# 1.8 - Fixes for Kindle for Mac, and non-ascii in Windows user names
|
||||||
|
# 1.9 - Fixes for Unicode in Windows user names
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -26,7 +27,7 @@ Retrieve Kindle for PC/Mac user key.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__license__ = 'GPL v3'
|
__license__ = 'GPL v3'
|
||||||
__version__ = '1.8'
|
__version__ = '1.9'
|
||||||
|
|
||||||
import sys, os, re
|
import sys, os, re
|
||||||
from struct import pack, unpack, unpack_from
|
from struct import pack, unpack, unpack_from
|
||||||
|
@ -907,18 +908,34 @@ if iswindows:
|
||||||
return CryptUnprotectData
|
return CryptUnprotectData
|
||||||
CryptUnprotectData = CryptUnprotectData()
|
CryptUnprotectData = CryptUnprotectData()
|
||||||
|
|
||||||
|
# Returns Environmental Variables that contain unicode
|
||||||
|
def getEnvironmentVariable(name):
|
||||||
|
import ctypes
|
||||||
|
name = unicode(name) # make sure string argument is unicode
|
||||||
|
n = ctypes.windll.kernel32.GetEnvironmentVariableW(name, None, 0)
|
||||||
|
if n == 0:
|
||||||
|
return None
|
||||||
|
buf = ctypes.create_unicode_buffer(u'\0'*n)
|
||||||
|
ctypes.windll.kernel32.GetEnvironmentVariableW(name, buf, n)
|
||||||
|
return buf.value
|
||||||
|
|
||||||
# Locate all of the kindle-info style files and return as list
|
# Locate all of the kindle-info style files and return as list
|
||||||
def getKindleInfoFiles():
|
def getKindleInfoFiles():
|
||||||
kInfoFiles = []
|
kInfoFiles = []
|
||||||
# some 64 bit machines do not have the proper registry key for some reason
|
# some 64 bit machines do not have the proper registry key for some reason
|
||||||
# or the pythonn interface to the 32 vs 64 bit registry is broken
|
# or the python interface to the 32 vs 64 bit registry is broken
|
||||||
path = ""
|
path = ""
|
||||||
if 'LOCALAPPDATA' in os.environ.keys():
|
if 'LOCALAPPDATA' in os.environ.keys():
|
||||||
path = os.environ['LOCALAPPDATA']
|
# Python 2.x does not return unicode env. Use Python 3.x
|
||||||
|
path = winreg.ExpandEnvironmentStrings(u"%LOCALAPPDATA%")
|
||||||
|
# this is just another alternative.
|
||||||
|
# path = getEnvironmentVariable('LOCALAPPDATA')
|
||||||
|
if not os.path.isdir(path):
|
||||||
|
path = ""
|
||||||
else:
|
else:
|
||||||
# User Shell Folders show take precedent over Shell Folders if present
|
# User Shell Folders show take precedent over Shell Folders if present
|
||||||
try:
|
try:
|
||||||
|
# this will still break
|
||||||
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\User Shell Folders\\")
|
regkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\User Shell Folders\\")
|
||||||
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
path = winreg.QueryValueEx(regkey, 'Local AppData')[0]
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
|
@ -937,13 +954,14 @@ if iswindows:
|
||||||
if path == "":
|
if path == "":
|
||||||
print ('Could not find the folder in which to look for kinfoFiles.')
|
print ('Could not find the folder in which to look for kinfoFiles.')
|
||||||
else:
|
else:
|
||||||
print('searching for kinfoFiles in ' + path)
|
# Probably not the best. To Fix (shouldn't ignore in encoding) or use utf-8
|
||||||
|
print(u'searching for kinfoFiles in ' + path.encode('ascii', 'ignore'))
|
||||||
|
|
||||||
# look for (K4PC 1.9.0 and later) .kinf2011 file
|
# look for (K4PC 1.9.0 and later) .kinf2011 file
|
||||||
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
|
||||||
if os.path.isfile(kinfopath):
|
if os.path.isfile(kinfopath):
|
||||||
found = True
|
found = True
|
||||||
print('Found K4PC 1.9+ kinf2011 file: ' + kinfopath)
|
print('Found K4PC 1.9+ kinf2011 file: ' + kinfopath.encode('ascii','ignore'))
|
||||||
kInfoFiles.append(kinfopath)
|
kInfoFiles.append(kinfopath)
|
||||||
|
|
||||||
# look for (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
# look for (K4PC 1.6.0 and later) rainier.2.1.1.kinf file
|
||||||
|
@ -1142,7 +1160,7 @@ if iswindows:
|
||||||
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
|
||||||
DB[keyname] = cleartext
|
DB[keyname] = cleartext
|
||||||
|
|
||||||
if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB:
|
if 'kindle.account.tokens' in DB:
|
||||||
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(GetIDString(), GetUserName().decode("latin-1"))
|
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(GetIDString(), GetUserName().decode("latin-1"))
|
||||||
# store values used in decryption
|
# store values used in decryption
|
||||||
DB['IDString'] = GetIDString()
|
DB['IDString'] = GetIDString()
|
||||||
|
@ -1758,7 +1776,7 @@ elif isosx:
|
||||||
break
|
break
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
if 'MazamaRandomNumber' in DB and 'kindle.account.tokens' in DB:
|
if 'kindle.account.tokens' in DB:
|
||||||
# store values used in decryption
|
# store values used in decryption
|
||||||
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(IDString, GetUserName())
|
print u"Decrypted key file using IDString '{0:s}' and UserName '{1:s}'".format(IDString, GetUserName())
|
||||||
DB['IDString'] = IDString
|
DB['IDString'] = IDString
|
||||||
|
|
Loading…
Reference in New Issue