trogdor: update python scripts for python3
Change-Id: I46525243729c1dbcd30b346d4603452eea14ad9d Signed-off-by: T Michael Turney <mturney@codeaurora.org> Reviewed-on: https://review.coreboot.org/c/coreboot/+/38558 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Julius Werner <jwerner@chromium.org>
This commit is contained in:
parent
b1fa25fab7
commit
540b8ecc1e
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python3
|
||||||
#============================================================================
|
#============================================================================
|
||||||
#
|
#
|
||||||
#/** @file createxbl.py
|
#/** @file createxbl.py
|
||||||
|
@ -238,9 +238,9 @@ def main():
|
||||||
target_nonsec = target_base + "_combined_hash.mbn"
|
target_nonsec = target_base + "_combined_hash.mbn"
|
||||||
|
|
||||||
|
|
||||||
#print "Input file 1:", elf_inp_file1
|
#print("Input file 1:", elf_inp_file1)
|
||||||
#print "Input file 2:", elf_inp_file2
|
#print("Input file 2:", elf_inp_file2)
|
||||||
#print "Output file:", binary_out
|
#print("Output file:", binary_out)
|
||||||
|
|
||||||
merge_elfs([],
|
merge_elfs([],
|
||||||
elf_inp_file1,
|
elf_inp_file1,
|
||||||
|
@ -270,7 +270,7 @@ def main():
|
||||||
secure_type = image_header_secflag,
|
secure_type = image_header_secflag,
|
||||||
header_version = header_version )
|
header_version = header_version )
|
||||||
if rv:
|
if rv:
|
||||||
raise RuntimeError, "Failed to run pboot_gen_elf"
|
raise RuntimeError("Failed to run pboot_gen_elf")
|
||||||
|
|
||||||
# Create hash table header
|
# Create hash table header
|
||||||
rv = mbn_tools.image_header([],
|
rv = mbn_tools.image_header([],
|
||||||
|
@ -281,7 +281,7 @@ def main():
|
||||||
elf_file_name = source_elf,
|
elf_file_name = source_elf,
|
||||||
header_version = header_version)
|
header_version = header_version)
|
||||||
if rv:
|
if rv:
|
||||||
raise RuntimeError, "Failed to create image header for hash segment"
|
raise RuntimeError("Failed to create image header for hash segment")
|
||||||
|
|
||||||
files_to_cat_in_order = [target_hash_hd, target_hash]
|
files_to_cat_in_order = [target_hash_hd, target_hash]
|
||||||
mbn_tools.concat_files (target_nonsec, files_to_cat_in_order)
|
mbn_tools.concat_files (target_nonsec, files_to_cat_in_order)
|
||||||
|
@ -369,7 +369,7 @@ def merge_elfs(env,
|
||||||
|
|
||||||
# Create a new ELF header for the output file
|
# Create a new ELF header for the output file
|
||||||
if is_out_elf_64_bit:
|
if is_out_elf_64_bit:
|
||||||
out_elf_header = mbn_tools.Elf64_Ehdr('\0' * ELF64_HDR_SIZE)
|
out_elf_header = mbn_tools.Elf64_Ehdr(b'\0' * ELF64_HDR_SIZE)
|
||||||
out_elf_header.e_phoff = ELF64_HDR_SIZE
|
out_elf_header.e_phoff = ELF64_HDR_SIZE
|
||||||
out_elf_header.e_ehsize = ELF64_HDR_SIZE
|
out_elf_header.e_ehsize = ELF64_HDR_SIZE
|
||||||
out_elf_header.e_phentsize = ELF64_PHDR_SIZE
|
out_elf_header.e_phentsize = ELF64_PHDR_SIZE
|
||||||
|
@ -384,7 +384,7 @@ def merge_elfs(env,
|
||||||
|
|
||||||
out_elf_header.e_entry = elf_header1.e_entry
|
out_elf_header.e_entry = elf_header1.e_entry
|
||||||
else:
|
else:
|
||||||
out_elf_header = mbn_tools.Elf32_Ehdr('\0' * ELF32_HDR_SIZE)
|
out_elf_header = mbn_tools.Elf32_Ehdr(b'\0' * ELF32_HDR_SIZE)
|
||||||
out_elf_header.e_phoff = ELF32_HDR_SIZE
|
out_elf_header.e_phoff = ELF32_HDR_SIZE
|
||||||
out_elf_header.e_ehsize = ELF32_HDR_SIZE
|
out_elf_header.e_ehsize = ELF32_HDR_SIZE
|
||||||
out_elf_header.e_phentsize = ELF32_PHDR_SIZE
|
out_elf_header.e_phentsize = ELF32_PHDR_SIZE
|
||||||
|
@ -401,7 +401,7 @@ def merge_elfs(env,
|
||||||
# Address needs to be verified that it is not greater than 32 bits
|
# Address needs to be verified that it is not greater than 32 bits
|
||||||
# as it is possible to go from a 64 bit elf to 32.
|
# as it is possible to go from a 64 bit elf to 32.
|
||||||
if (elf_header1.e_entry > 0xFFFFFFFF):
|
if (elf_header1.e_entry > 0xFFFFFFFF):
|
||||||
print "ERROR: File 1's entry point is too large to convert."
|
print("ERROR: File 1's entry point is too large to convert.")
|
||||||
exit()
|
exit()
|
||||||
out_elf_header.e_entry = elf_header1.e_entry
|
out_elf_header.e_entry = elf_header1.e_entry
|
||||||
|
|
||||||
|
@ -457,7 +457,7 @@ def merge_elfs(env,
|
||||||
# Copy program header piece by piece to ensure possible conversion success
|
# Copy program header piece by piece to ensure possible conversion success
|
||||||
if is_out_elf_64_bit == True:
|
if is_out_elf_64_bit == True:
|
||||||
# Converting from 32 to 64 elf requires no data size validation
|
# Converting from 32 to 64 elf requires no data size validation
|
||||||
new_phdr = mbn_tools.Elf64_Phdr('\0' * ELF64_PHDR_SIZE)
|
new_phdr = mbn_tools.Elf64_Phdr(b'\0' * ELF64_PHDR_SIZE)
|
||||||
new_phdr.p_type = curr_phdr.p_type
|
new_phdr.p_type = curr_phdr.p_type
|
||||||
new_phdr.p_offset = segment_offset
|
new_phdr.p_offset = segment_offset
|
||||||
new_phdr.p_vaddr = curr_phdr.p_vaddr
|
new_phdr.p_vaddr = curr_phdr.p_vaddr
|
||||||
|
@ -470,7 +470,7 @@ def merge_elfs(env,
|
||||||
# Converting from 64 to 32 elf requires data size validation
|
# Converting from 64 to 32 elf requires data size validation
|
||||||
# Note that there is an option to discard a segment if it is only ZI
|
# Note that there is an option to discard a segment if it is only ZI
|
||||||
# and its address is greater than 32 bits
|
# and its address is greater than 32 bits
|
||||||
new_phdr = mbn_tools.Elf32_Phdr('\0' * ELF32_PHDR_SIZE)
|
new_phdr = mbn_tools.Elf32_Phdr(b'\0' * ELF32_PHDR_SIZE)
|
||||||
new_phdr.p_type = curr_phdr.p_type
|
new_phdr.p_type = curr_phdr.p_type
|
||||||
new_phdr.p_offset = segment_offset
|
new_phdr.p_offset = segment_offset
|
||||||
|
|
||||||
|
@ -478,7 +478,7 @@ def merge_elfs(env,
|
||||||
if (zi_oob_enabled == True) and (curr_phdr.p_filesz == 0):
|
if (zi_oob_enabled == True) and (curr_phdr.p_filesz == 0):
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
print "ERROR: File 1 VAddr is too large for conversion."
|
print("ERROR: File 1 VAddr is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_vaddr = curr_phdr.p_vaddr
|
new_phdr.p_vaddr = curr_phdr.p_vaddr
|
||||||
|
|
||||||
|
@ -486,33 +486,33 @@ def merge_elfs(env,
|
||||||
if (zi_oob_enabled == True) and (curr_phdr.p_filesz == 0):
|
if (zi_oob_enabled == True) and (curr_phdr.p_filesz == 0):
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
print "ERROR: File 1 PAddr is too large for conversion."
|
print("ERROR: File 1 PAddr is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_paddr = curr_phdr.p_paddr
|
new_phdr.p_paddr = curr_phdr.p_paddr
|
||||||
|
|
||||||
if curr_phdr.p_filesz > 0xFFFFFFFF:
|
if curr_phdr.p_filesz > 0xFFFFFFFF:
|
||||||
print "ERROR: File 1 Filesz is too large for conversion."
|
print("ERROR: File 1 Filesz is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_filesz = curr_phdr.p_filesz
|
new_phdr.p_filesz = curr_phdr.p_filesz
|
||||||
|
|
||||||
if curr_phdr.p_memsz > 0xFFFFFFFF:
|
if curr_phdr.p_memsz > 0xFFFFFFFF:
|
||||||
print "ERROR: File 1 Memsz is too large for conversion."
|
print("ERROR: File 1 Memsz is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_memsz = curr_phdr.p_memsz
|
new_phdr.p_memsz = curr_phdr.p_memsz
|
||||||
|
|
||||||
if curr_phdr.p_flags > 0xFFFFFFFF:
|
if curr_phdr.p_flags > 0xFFFFFFFF:
|
||||||
print "ERROR: File 1 Flags is too large for conversion."
|
print("ERROR: File 1 Flags is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_flags = curr_phdr.p_flags
|
new_phdr.p_flags = curr_phdr.p_flags
|
||||||
|
|
||||||
if curr_phdr.p_align > 0xFFFFFFFF:
|
if curr_phdr.p_align > 0xFFFFFFFF:
|
||||||
print "ERROR: File 1 Align is too large for conversion."
|
print("ERROR: File 1 Align is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_align = curr_phdr.p_align
|
new_phdr.p_align = curr_phdr.p_align
|
||||||
|
|
||||||
|
|
||||||
#print "i=",i
|
#print("i=",i)
|
||||||
#print "phdr_offset=", phdr_offset
|
#print("phdr_offset=", phdr_offset)
|
||||||
|
|
||||||
# update output file location to next phdr location
|
# update output file location to next phdr location
|
||||||
elf_out_fp.seek(phdr_offset)
|
elf_out_fp.seek(phdr_offset)
|
||||||
|
@ -521,14 +521,14 @@ def merge_elfs(env,
|
||||||
|
|
||||||
inp_data_offset = curr_phdr.p_offset # used to read data from input file
|
inp_data_offset = curr_phdr.p_offset # used to read data from input file
|
||||||
|
|
||||||
# print "inp_data_offset="
|
# print("inp_data_offset=")
|
||||||
# print inp_data_offset
|
# print(inp_data_offset)
|
||||||
#
|
#
|
||||||
# print "curr_phdr.p_offset="
|
# print("curr_phdr.p_offset=")
|
||||||
# print curr_phdr.p_offset
|
# print(curr_phdr.p_offset)
|
||||||
#
|
#
|
||||||
# print "curr_phdr.p_filesz="
|
# print("curr_phdr.p_filesz=")
|
||||||
# print curr_phdr.p_filesz
|
# print(curr_phdr.p_filesz)
|
||||||
|
|
||||||
# output current phdr
|
# output current phdr
|
||||||
if is_out_elf_64_bit == False:
|
if is_out_elf_64_bit == False:
|
||||||
|
@ -555,7 +555,7 @@ def merge_elfs(env,
|
||||||
# Copy program header piece by piece to ensure possible conversion success
|
# Copy program header piece by piece to ensure possible conversion success
|
||||||
if is_out_elf_64_bit == True:
|
if is_out_elf_64_bit == True:
|
||||||
# Converting from 32 to 64 elf requires no data size validation
|
# Converting from 32 to 64 elf requires no data size validation
|
||||||
new_phdr = mbn_tools.Elf64_Phdr('\0' * ELF64_PHDR_SIZE)
|
new_phdr = mbn_tools.Elf64_Phdr(b'\0' * ELF64_PHDR_SIZE)
|
||||||
new_phdr.p_type = curr_phdr.p_type
|
new_phdr.p_type = curr_phdr.p_type
|
||||||
new_phdr.p_offset = segment_offset
|
new_phdr.p_offset = segment_offset
|
||||||
new_phdr.p_vaddr = curr_phdr.p_vaddr
|
new_phdr.p_vaddr = curr_phdr.p_vaddr
|
||||||
|
@ -568,7 +568,7 @@ def merge_elfs(env,
|
||||||
# Converting from 64 to 32 elf requires data size validation
|
# Converting from 64 to 32 elf requires data size validation
|
||||||
# Note that there is an option to discard a segment if it is only ZI
|
# Note that there is an option to discard a segment if it is only ZI
|
||||||
# and its address is greater than 32 bits
|
# and its address is greater than 32 bits
|
||||||
new_phdr = mbn_tools.Elf32_Phdr('\0' * ELF32_PHDR_SIZE)
|
new_phdr = mbn_tools.Elf32_Phdr(b'\0' * ELF32_PHDR_SIZE)
|
||||||
new_phdr.p_type = curr_phdr.p_type
|
new_phdr.p_type = curr_phdr.p_type
|
||||||
new_phdr.p_offset = segment_offset
|
new_phdr.p_offset = segment_offset
|
||||||
|
|
||||||
|
@ -576,7 +576,7 @@ def merge_elfs(env,
|
||||||
if (zi_oob_enabled == True) and (curr_phdr.p_filesz == 0):
|
if (zi_oob_enabled == True) and (curr_phdr.p_filesz == 0):
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
print "ERROR: File 2 VAddr is too large for conversion."
|
print("ERROR: File 2 VAddr is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_vaddr = curr_phdr.p_vaddr
|
new_phdr.p_vaddr = curr_phdr.p_vaddr
|
||||||
|
|
||||||
|
@ -584,33 +584,33 @@ def merge_elfs(env,
|
||||||
if (zi_oob_enabled == True) and (curr_phdr.p_filesz == 0):
|
if (zi_oob_enabled == True) and (curr_phdr.p_filesz == 0):
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
print "ERROR: File 2 PAddr is too large for conversion."
|
print("ERROR: File 2 PAddr is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_paddr = curr_phdr.p_paddr
|
new_phdr.p_paddr = curr_phdr.p_paddr
|
||||||
|
|
||||||
if curr_phdr.p_filesz > 0xFFFFFFFF:
|
if curr_phdr.p_filesz > 0xFFFFFFFF:
|
||||||
print "ERROR: File 2 Filesz is too large for conversion."
|
print("ERROR: File 2 Filesz is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_filesz = curr_phdr.p_filesz
|
new_phdr.p_filesz = curr_phdr.p_filesz
|
||||||
|
|
||||||
if curr_phdr.p_memsz > 0xFFFFFFFF:
|
if curr_phdr.p_memsz > 0xFFFFFFFF:
|
||||||
print "ERROR: File 2 Memsz is too large for conversion."
|
print("ERROR: File 2 Memsz is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_memsz = curr_phdr.p_memsz
|
new_phdr.p_memsz = curr_phdr.p_memsz
|
||||||
|
|
||||||
if curr_phdr.p_flags > 0xFFFFFFFF:
|
if curr_phdr.p_flags > 0xFFFFFFFF:
|
||||||
print "ERROR: File 2 Flags is too large for conversion."
|
print("ERROR: File 2 Flags is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_flags = curr_phdr.p_flags
|
new_phdr.p_flags = curr_phdr.p_flags
|
||||||
|
|
||||||
if curr_phdr.p_align > 0xFFFFFFFF:
|
if curr_phdr.p_align > 0xFFFFFFFF:
|
||||||
print "ERROR: File 2 Align is too large for conversion."
|
print("ERROR: File 2 Align is too large for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_align = curr_phdr.p_align
|
new_phdr.p_align = curr_phdr.p_align
|
||||||
|
|
||||||
|
|
||||||
# print "i=",i
|
# print("i=",i)
|
||||||
# print "phdr_offset=", phdr_offset
|
# print("phdr_offset=", phdr_offset)
|
||||||
|
|
||||||
# update output file location to next phdr location
|
# update output file location to next phdr location
|
||||||
elf_out_fp.seek(phdr_offset)
|
elf_out_fp.seek(phdr_offset)
|
||||||
|
@ -619,14 +619,14 @@ def merge_elfs(env,
|
||||||
|
|
||||||
inp_data_offset = curr_phdr.p_offset # used to read data from input file
|
inp_data_offset = curr_phdr.p_offset # used to read data from input file
|
||||||
|
|
||||||
# print "inp_data_offset="
|
# print("inp_data_offset=")
|
||||||
# print inp_data_offset
|
# print(inp_data_offset)
|
||||||
#
|
#
|
||||||
# print "curr_phdr.p_offset="
|
# print("curr_phdr.p_offset=")
|
||||||
# print curr_phdr.p_offset
|
# print(curr_phdr.p_offset)
|
||||||
#
|
#
|
||||||
# print "curr_phdr.p_filesz="
|
# print("curr_phdr.p_filesz=")
|
||||||
# print curr_phdr.p_filesz
|
# print(curr_phdr.p_filesz)
|
||||||
|
|
||||||
# output current phdr
|
# output current phdr
|
||||||
if is_out_elf_64_bit == False:
|
if is_out_elf_64_bit == False:
|
||||||
|
@ -658,14 +658,14 @@ def merge_elfs(env,
|
||||||
entry_seg_offset = phdr.p_offset
|
entry_seg_offset = phdr.p_offset
|
||||||
break
|
break
|
||||||
if entry_seg_offset == -1:
|
if entry_seg_offset == -1:
|
||||||
print "Error: Failed to find entry point in any segment!"
|
print("Error: Failed to find entry point in any segment!")
|
||||||
exit()
|
exit()
|
||||||
# magical equation for program header's phys and virt addr
|
# magical equation for program header's phys and virt addr
|
||||||
phys_virt_addr = entry_addr - entry_seg_offset
|
phys_virt_addr = entry_addr - entry_seg_offset
|
||||||
|
|
||||||
if is_out_elf_64_bit:
|
if is_out_elf_64_bit:
|
||||||
# Converting from 32 to 64 elf requires no data size validation
|
# Converting from 32 to 64 elf requires no data size validation
|
||||||
new_phdr = mbn_tools.Elf64_Phdr('\0' * ELF64_PHDR_SIZE)
|
new_phdr = mbn_tools.Elf64_Phdr(b'\0' * ELF64_PHDR_SIZE)
|
||||||
new_phdr.p_type = 0x1
|
new_phdr.p_type = 0x1
|
||||||
new_phdr.p_offset = segment_offset
|
new_phdr.p_offset = segment_offset
|
||||||
new_phdr.p_vaddr = phys_virt_addr
|
new_phdr.p_vaddr = phys_virt_addr
|
||||||
|
@ -683,7 +683,7 @@ def merge_elfs(env,
|
||||||
# Converting from 64 to 32 elf requires data size validation
|
# Converting from 64 to 32 elf requires data size validation
|
||||||
# Don't discard the segment containing xbl_sec, simply error out
|
# Don't discard the segment containing xbl_sec, simply error out
|
||||||
# if the address is greater than 32 bits
|
# if the address is greater than 32 bits
|
||||||
new_phdr = mbn_tools.Elf32_Phdr('\0' * ELF32_PHDR_SIZE)
|
new_phdr = mbn_tools.Elf32_Phdr(b'\0' * ELF32_PHDR_SIZE)
|
||||||
new_phdr.p_type = 0x1 #
|
new_phdr.p_type = 0x1 #
|
||||||
new_phdr.p_offset = segment_offset
|
new_phdr.p_offset = segment_offset
|
||||||
if header_version >= 5:
|
if header_version >= 5:
|
||||||
|
@ -696,13 +696,13 @@ def merge_elfs(env,
|
||||||
|
|
||||||
if phys_virt_addr > 0xFFFFFFFF:
|
if phys_virt_addr > 0xFFFFFFFF:
|
||||||
if zi_oob_enabled == False or curr_phdr.p_filesz != 0:
|
if zi_oob_enabled == False or curr_phdr.p_filesz != 0:
|
||||||
print "ERROR: File xbl_sec VAddr or PAddr is too big for conversion."
|
print("ERROR: File xbl_sec VAddr or PAddr is too big for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_vaddr = phys_virt_addr
|
new_phdr.p_vaddr = phys_virt_addr
|
||||||
new_phdr.p_paddr = phys_virt_addr
|
new_phdr.p_paddr = phys_virt_addr
|
||||||
|
|
||||||
if os.path.getsize(elf_in_file_xbl_sec) > 0xFFFFFFFF:
|
if os.path.getsize(elf_in_file_xbl_sec) > 0xFFFFFFFF:
|
||||||
print "ERROR: File xbl_sec Filesz is too big for conversion."
|
print("ERROR: File xbl_sec Filesz is too big for conversion.")
|
||||||
exit()
|
exit()
|
||||||
new_phdr.p_filesz = os.path.getsize(elf_in_file_xbl_sec)
|
new_phdr.p_filesz = os.path.getsize(elf_in_file_xbl_sec)
|
||||||
new_phdr.p_memsz = new_phdr.p_filesz
|
new_phdr.p_memsz = new_phdr.p_filesz
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python3
|
||||||
#
|
#
|
||||||
# Copyright (c) 2013 The Linux Foundation. All rights reserved.
|
# Copyright (c) 2013 The Linux Foundation. All rights reserved.
|
||||||
#
|
#
|
||||||
|
@ -97,7 +97,7 @@ def usage(msg=None):
|
||||||
if msg != None:
|
if msg != None:
|
||||||
sys.stderr.write("%s: %s\n" % (PROG_NAME, msg))
|
sys.stderr.write("%s: %s\n" % (PROG_NAME, msg))
|
||||||
|
|
||||||
print "Usage: %s <base-addr> <input-file> <output-file>" % PROG_NAME
|
print("Usage: %s <base-addr> <input-file> <output-file>" % PROG_NAME)
|
||||||
|
|
||||||
if msg != None:
|
if msg != None:
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python3
|
||||||
#===============================================================================
|
#===============================================================================
|
||||||
#
|
#
|
||||||
# MBN TOOLS
|
# MBN TOOLS
|
||||||
|
@ -100,24 +100,24 @@ ELFINFO_MAG0_INDEX = 0
|
||||||
ELFINFO_MAG1_INDEX = 1
|
ELFINFO_MAG1_INDEX = 1
|
||||||
ELFINFO_MAG2_INDEX = 2
|
ELFINFO_MAG2_INDEX = 2
|
||||||
ELFINFO_MAG3_INDEX = 3
|
ELFINFO_MAG3_INDEX = 3
|
||||||
ELFINFO_MAG0 = '\x7f'
|
ELFINFO_MAG0 = 127 # 0x7F
|
||||||
ELFINFO_MAG1 = 'E'
|
ELFINFO_MAG1 = 69 # E
|
||||||
ELFINFO_MAG2 = 'L'
|
ELFINFO_MAG2 = 76 # L
|
||||||
ELFINFO_MAG3 = 'F'
|
ELFINFO_MAG3 = 70 # F
|
||||||
ELFINFO_CLASS_INDEX = 4
|
ELFINFO_CLASS_INDEX = 4
|
||||||
ELFINFO_CLASS_32 = '\x01'
|
ELFINFO_CLASS_32 = 1
|
||||||
ELFINFO_CLASS_64 = '\x02'
|
ELFINFO_CLASS_64 = 2
|
||||||
ELFINFO_VERSION_INDEX = 6
|
ELFINFO_VERSION_INDEX = 6
|
||||||
ELFINFO_VERSION_CURRENT = '\x01'
|
ELFINFO_VERSION_CURRENT = 1
|
||||||
ELF_BLOCK_ALIGN = 0x1000
|
ELF_BLOCK_ALIGN = 0x1000
|
||||||
ALIGNVALUE_1MB = 0x100000
|
ALIGNVALUE_1MB = 0x100000
|
||||||
ALIGNVALUE_4MB = 0x400000
|
ALIGNVALUE_4MB = 0x400000
|
||||||
ELFINFO_DATA2LSB = '\x01'
|
ELFINFO_DATA2LSB = b'\x01'
|
||||||
ELFINFO_EXEC_ETYPE = '\x02\x00'
|
ELFINFO_EXEC_ETYPE = b'\x02\x00'
|
||||||
ELFINFO_ARM_MACHINETYPE = '\x28\x00'
|
ELFINFO_ARM_MACHINETYPE = b'\x28\x00'
|
||||||
ELFINFO_VERSION_EV_CURRENT = '\x01\x00\x00\x00'
|
ELFINFO_VERSION_EV_CURRENT = b'\x01\x00\x00\x00'
|
||||||
ELFINFO_SHOFF = 0x00
|
ELFINFO_SHOFF = 0x00
|
||||||
ELFINFO_PHNUM = '\x01\x00'
|
ELFINFO_PHNUM = b'\x01\x00'
|
||||||
ELFINFO_RESERVED = 0x00
|
ELFINFO_RESERVED = 0x00
|
||||||
|
|
||||||
# ELF Program Header Types
|
# ELF Program Header Types
|
||||||
|
@ -330,9 +330,9 @@ class Elf_Ehdr_common:
|
||||||
self.e_version = unpacked_data[3]
|
self.e_version = unpacked_data[3]
|
||||||
|
|
||||||
def printValues(self):
|
def printValues(self):
|
||||||
print "ATTRIBUTE / VALUE"
|
print("ATTRIBUTE / VALUE")
|
||||||
for attr, value in self.__dict__.iteritems():
|
for attr, value in self.__dict__.items():
|
||||||
print attr, value
|
print(attr, value)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -362,12 +362,16 @@ class Elf32_Ehdr:
|
||||||
self.e_shstrndx = unpacked_data[13]
|
self.e_shstrndx = unpacked_data[13]
|
||||||
|
|
||||||
def printValues(self):
|
def printValues(self):
|
||||||
print "ATTRIBUTE / VALUE"
|
print("ATTRIBUTE / VALUE")
|
||||||
for attr, value in self.__dict__.iteritems():
|
for attr, value in self.__dict__.items():
|
||||||
print attr, value
|
print(attr, value)
|
||||||
|
|
||||||
def getPackedData(self):
|
def getPackedData(self):
|
||||||
values = [self.e_ident,
|
if type(self.e_ident) == str:
|
||||||
|
packvalue = bytes(self.e_ident, 'utf-8')
|
||||||
|
else:
|
||||||
|
packvalue = self.e_ident
|
||||||
|
values = [packvalue,
|
||||||
self.e_type,
|
self.e_type,
|
||||||
self.e_machine,
|
self.e_machine,
|
||||||
self.e_version,
|
self.e_version,
|
||||||
|
@ -406,9 +410,9 @@ class Elf32_Phdr:
|
||||||
self.p_align = unpacked_data[7]
|
self.p_align = unpacked_data[7]
|
||||||
|
|
||||||
def printValues(self):
|
def printValues(self):
|
||||||
print "ATTRIBUTE / VALUE"
|
print("ATTRIBUTE / VALUE")
|
||||||
for attr, value in self.__dict__.iteritems():
|
for attr, value in self.__dict__.items():
|
||||||
print attr, value
|
print(attr, value)
|
||||||
|
|
||||||
def getPackedData(self):
|
def getPackedData(self):
|
||||||
values = [self.p_type,
|
values = [self.p_type,
|
||||||
|
@ -449,12 +453,16 @@ class Elf64_Ehdr:
|
||||||
self.e_shstrndx = unpacked_data[13]
|
self.e_shstrndx = unpacked_data[13]
|
||||||
|
|
||||||
def printValues(self):
|
def printValues(self):
|
||||||
print "ATTRIBUTE / VALUE"
|
print("ATTRIBUTE / VALUE")
|
||||||
for attr, value in self.__dict__.iteritems():
|
for attr, value in self.__dict__.items():
|
||||||
print attr, value
|
print(attr, value)
|
||||||
|
|
||||||
def getPackedData(self):
|
def getPackedData(self):
|
||||||
values = [self.e_ident,
|
if type(self.e_ident) == str:
|
||||||
|
packvalue = bytes(self.e_ident, 'utf-8')
|
||||||
|
else:
|
||||||
|
packvalue = self.e_ident
|
||||||
|
values = [packvalue,
|
||||||
self.e_type,
|
self.e_type,
|
||||||
self.e_machine,
|
self.e_machine,
|
||||||
self.e_version,
|
self.e_version,
|
||||||
|
@ -493,9 +501,9 @@ class Elf64_Phdr:
|
||||||
self.p_align = unpacked_data[7]
|
self.p_align = unpacked_data[7]
|
||||||
|
|
||||||
def printValues(self):
|
def printValues(self):
|
||||||
print "ATTRIBUTE / VALUE"
|
print("ATTRIBUTE / VALUE")
|
||||||
for attr, value in self.__dict__.iteritems():
|
for attr, value in self.__dict__.items():
|
||||||
print attr, value
|
print(attr, value)
|
||||||
|
|
||||||
def getPackedData(self):
|
def getPackedData(self):
|
||||||
values = [self.p_type,
|
values = [self.p_type,
|
||||||
|
@ -518,7 +526,7 @@ class SegmentInfo:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.flag = 0
|
self.flag = 0
|
||||||
def printValues(self):
|
def printValues(self):
|
||||||
print 'Flag: ' + str(self.flag)
|
print('Flag: ' + str(self.flag))
|
||||||
|
|
||||||
#----------------------------------------------------------------------------
|
#----------------------------------------------------------------------------
|
||||||
# Regular Boot Header Class
|
# Regular Boot Header Class
|
||||||
|
@ -740,7 +748,7 @@ def generate_meta_data(env, meta_out_file_name, add_magic_num = False):
|
||||||
xml_target_file.close()
|
xml_target_file.close()
|
||||||
else:
|
else:
|
||||||
xml_target_file.close()
|
xml_target_file.close()
|
||||||
raise RuntimeError, "XML Size too large: " + str(xml_header_size)
|
raise RuntimeError("XML Size too large: " + str(xml_header_size))
|
||||||
|
|
||||||
#----------------------------------------------------------------------------
|
#----------------------------------------------------------------------------
|
||||||
# encrypt_mbn
|
# encrypt_mbn
|
||||||
|
@ -831,7 +839,7 @@ def create_elf_header( output_file_name,
|
||||||
is_elf_64_bit = False):
|
is_elf_64_bit = False):
|
||||||
|
|
||||||
if (output_file_name is None):
|
if (output_file_name is None):
|
||||||
raise RuntimeError, "Requires a ELF header file"
|
raise RuntimeError("Requires a ELF header file")
|
||||||
|
|
||||||
# Create a elf header and program header
|
# Create a elf header and program header
|
||||||
# Write the headers to the output file
|
# Write the headers to the output file
|
||||||
|
@ -922,13 +930,13 @@ def image_header(env, gen_dict,
|
||||||
|
|
||||||
# Preliminary checks
|
# Preliminary checks
|
||||||
if (requires_preamble is True) and (preamble_file_name is None):
|
if (requires_preamble is True) and (preamble_file_name is None):
|
||||||
raise RuntimeError, "Image Header requires a preamble file"
|
raise RuntimeError("Image Header requires a preamble file")
|
||||||
|
|
||||||
if (gen_dict['IMAGE_KEY_MBN_TYPE'] == 'elf') and (elf_file_name is None):
|
if (gen_dict['IMAGE_KEY_MBN_TYPE'] == 'elf') and (elf_file_name is None):
|
||||||
raise RuntimeError, "ELF Image Headers require an elf file"
|
raise RuntimeError("ELF Image Headers require an elf file")
|
||||||
|
|
||||||
if (in_code_size is None) and (os.path.exists(code_file_name) is False):
|
if (in_code_size is None) and (os.path.exists(code_file_name) is False):
|
||||||
raise RuntimeError, "Code size unavailable, and input file does not exist"
|
raise RuntimeError("Code size unavailable, and input file does not exist")
|
||||||
|
|
||||||
# Initialize
|
# Initialize
|
||||||
if in_code_size is not None:
|
if in_code_size is not None:
|
||||||
|
@ -1018,7 +1026,7 @@ def image_header(env, gen_dict,
|
||||||
boot_header.writePackedData(target = output_file_name, write_full_hdr = write_full_hdr)
|
boot_header.writePackedData(target = output_file_name, write_full_hdr = write_full_hdr)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise RuntimeError, "Header format not supported: " + str(header_format)
|
raise RuntimeError("Header format not supported: " + str(header_format))
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
@ -1066,15 +1074,15 @@ def pboot_gen_elf(env, elf_in_file_name,
|
||||||
hashtable_shift = 0
|
hashtable_shift = 0
|
||||||
|
|
||||||
if elf_header.e_ident[ELFINFO_CLASS_INDEX] == ELFINFO_CLASS_64:
|
if elf_header.e_ident[ELFINFO_CLASS_INDEX] == ELFINFO_CLASS_64:
|
||||||
new_phdr = Elf64_Phdr('\0' * ELF64_PHDR_SIZE)
|
new_phdr = Elf64_Phdr(b'\0' * ELF64_PHDR_SIZE)
|
||||||
elf_header_size = ELF64_HDR_SIZE
|
elf_header_size = ELF64_HDR_SIZE
|
||||||
is_elf64 = True
|
is_elf64 = True
|
||||||
else:
|
else:
|
||||||
new_phdr = Elf32_Phdr('\0' * ELF32_PHDR_SIZE)
|
new_phdr = Elf32_Phdr(b'\0' * ELF32_PHDR_SIZE)
|
||||||
elf_header_size = ELF32_HDR_SIZE
|
elf_header_size = ELF32_HDR_SIZE
|
||||||
is_elf64 = False
|
is_elf64 = False
|
||||||
|
|
||||||
hash = '\0' * mi_prog_boot_digest_size
|
hash = b'\0' * mi_prog_boot_digest_size
|
||||||
phdr_start = 0
|
phdr_start = 0
|
||||||
bytes_to_pad = 0
|
bytes_to_pad = 0
|
||||||
hash_seg_end = 0
|
hash_seg_end = 0
|
||||||
|
@ -1083,7 +1091,7 @@ def pboot_gen_elf(env, elf_in_file_name,
|
||||||
if elf_out_file_name is not None:
|
if elf_out_file_name is not None:
|
||||||
# Assert limit on number of program headers in input ELF
|
# Assert limit on number of program headers in input ELF
|
||||||
if num_phdrs > MAX_PHDR_COUNT:
|
if num_phdrs > MAX_PHDR_COUNT:
|
||||||
raise RuntimeError, "Input ELF has exceeded maximum number of program headers"
|
raise RuntimeError("Input ELF has exceeded maximum number of program headers")
|
||||||
|
|
||||||
# Create new program header for the ELF Header + Program Headers
|
# Create new program header for the ELF Header + Program Headers
|
||||||
new_phdr.p_type = NULL_TYPE
|
new_phdr.p_type = NULL_TYPE
|
||||||
|
@ -1093,11 +1101,11 @@ def pboot_gen_elf(env, elf_in_file_name,
|
||||||
elf_header.e_phnum += 2
|
elf_header.e_phnum += 2
|
||||||
|
|
||||||
# Create an empty hash entry for PHDR_TYPE
|
# Create an empty hash entry for PHDR_TYPE
|
||||||
hash_out_fp.write('\0' * mi_prog_boot_digest_size)
|
hash_out_fp.write(b'\0' * mi_prog_boot_digest_size)
|
||||||
hashtable_size += mi_prog_boot_digest_size
|
hashtable_size += mi_prog_boot_digest_size
|
||||||
|
|
||||||
# Create an empty hash entry for the hash segment itself
|
# Create an empty hash entry for the hash segment itself
|
||||||
hash_out_fp.write('\0' * mi_prog_boot_digest_size)
|
hash_out_fp.write(b'\0' * mi_prog_boot_digest_size)
|
||||||
hashtable_size += mi_prog_boot_digest_size
|
hashtable_size += mi_prog_boot_digest_size
|
||||||
|
|
||||||
# Begin hash table generation
|
# Begin hash table generation
|
||||||
|
@ -1117,7 +1125,7 @@ def pboot_gen_elf(env, elf_in_file_name,
|
||||||
|
|
||||||
# Seg_size should be page aligned
|
# Seg_size should be page aligned
|
||||||
if (seg_size & (ELF_BLOCK_ALIGN - 1)) > 0:
|
if (seg_size & (ELF_BLOCK_ALIGN - 1)) > 0:
|
||||||
raise RuntimeError, "seg_size: " + hex(seg_size) + " is not ELF page aligned!"
|
raise RuntimeError("seg_size: " + hex(seg_size) + " is not ELF page aligned!")
|
||||||
|
|
||||||
off = seg_offset + seg_size
|
off = seg_offset + seg_size
|
||||||
|
|
||||||
|
@ -1134,7 +1142,7 @@ def pboot_gen_elf(env, elf_in_file_name,
|
||||||
if MI_PBT_CHECK_FLAG_TYPE(curr_phdr.p_flags) is True:
|
if MI_PBT_CHECK_FLAG_TYPE(curr_phdr.p_flags) is True:
|
||||||
hash = generate_hash(fbuf, sha_algo)
|
hash = generate_hash(fbuf, sha_algo)
|
||||||
else:
|
else:
|
||||||
hash = '\0' * mi_prog_boot_digest_size
|
hash = b'\0' * mi_prog_boot_digest_size
|
||||||
|
|
||||||
# Write hash to file
|
# Write hash to file
|
||||||
hash_out_fp.write(hash)
|
hash_out_fp.write(hash)
|
||||||
|
@ -1153,7 +1161,7 @@ def pboot_gen_elf(env, elf_in_file_name,
|
||||||
if (MI_PBT_CHECK_FLAG_TYPE(curr_phdr.p_flags) is True) and (data_len > 0):
|
if (MI_PBT_CHECK_FLAG_TYPE(curr_phdr.p_flags) is True) and (data_len > 0):
|
||||||
hash = generate_hash(file_buff, sha_algo)
|
hash = generate_hash(file_buff, sha_algo)
|
||||||
else:
|
else:
|
||||||
hash = '\0' * mi_prog_boot_digest_size
|
hash = b'\0' * mi_prog_boot_digest_size
|
||||||
|
|
||||||
# Write hash to file
|
# Write hash to file
|
||||||
hash_out_fp.write(hash)
|
hash_out_fp.write(hash)
|
||||||
|
@ -1179,9 +1187,9 @@ def pboot_gen_elf(env, elf_in_file_name,
|
||||||
if (hash_seg_max_size is not None):
|
if (hash_seg_max_size is not None):
|
||||||
# Error checking for hash segment size validity
|
# Error checking for hash segment size validity
|
||||||
if hashtable_size > hash_seg_max_size:
|
if hashtable_size > hash_seg_max_size:
|
||||||
raise RuntimeError, "Hash table exceeds maximum hash segment size: " + hex(hash_seg_max_size)
|
raise RuntimeError("Hash table exceeds maximum hash segment size: " + hex(hash_seg_max_size))
|
||||||
if (hash_seg_max_size & (ELF_BLOCK_ALIGN-1)) is not 0:
|
if (hash_seg_max_size & (ELF_BLOCK_ALIGN-1)) is not 0:
|
||||||
raise RuntimeError, "Hash segment size passed is not ELF Block Aligned: " + hex(hash_seg_max_size)
|
raise RuntimeError("Hash segment size passed is not ELF Block Aligned: " + hex(hash_seg_max_size))
|
||||||
|
|
||||||
# Check if hash physical address parameter was passed
|
# Check if hash physical address parameter was passed
|
||||||
if last_phys_addr is not None:
|
if last_phys_addr is not None:
|
||||||
|
@ -1324,7 +1332,7 @@ def pboot_add_hash(env, elf_in_file_name,
|
||||||
file_copy_offset(hash_tbl_fp, 0, elf_out_fp, hash_hdr_offset, hash_size)
|
file_copy_offset(hash_tbl_fp, 0, elf_out_fp, hash_hdr_offset, hash_size)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise RuntimeError, "Hash segment program header not found in file " + elf_in_file_name
|
raise RuntimeError("Hash segment program header not found in file " + elf_in_file_name)
|
||||||
|
|
||||||
# Close files
|
# Close files
|
||||||
elf_in_fp.close()
|
elf_in_fp.close()
|
||||||
|
@ -1339,7 +1347,7 @@ def pboot_add_hash(env, elf_in_file_name,
|
||||||
def image_auth(env, *args):
|
def image_auth(env, *args):
|
||||||
|
|
||||||
if len(args) < 7 or len(args) > 8:
|
if len(args) < 7 or len(args) > 8:
|
||||||
raise RuntimeError, "Usage Invalid"
|
raise RuntimeError("Usage Invalid")
|
||||||
|
|
||||||
# Initialize File Names
|
# Initialize File Names
|
||||||
binary_in = args[0]
|
binary_in = args[0]
|
||||||
|
@ -1369,7 +1377,7 @@ def image_auth(env, *args):
|
||||||
num_certs = num_certs + 1
|
num_certs = num_certs + 1
|
||||||
|
|
||||||
if (num_certs == 0):
|
if (num_certs == 0):
|
||||||
raise RuntimeError, "Missing file(s) required for signing.\n"
|
raise RuntimeError("Missing file(s) required for signing.\n")
|
||||||
|
|
||||||
# Create the Certificate Chain
|
# Create the Certificate Chain
|
||||||
concat_files (cert_chain_out, cert_list)
|
concat_files (cert_chain_out, cert_list)
|
||||||
|
@ -1383,7 +1391,7 @@ def image_auth(env, *args):
|
||||||
pad_file(cert_fp, bytes_to_pad, PAD_BYTE_1)
|
pad_file(cert_fp, bytes_to_pad, PAD_BYTE_1)
|
||||||
cert_fp.close()
|
cert_fp.close()
|
||||||
else:
|
else:
|
||||||
raise RuntimeError, "Certificate Size too large: " + str(cert_size)
|
raise RuntimeError("Certificate Size too large: " + str(cert_size))
|
||||||
|
|
||||||
# Create the Final Signed Image File
|
# Create the Final Signed Image File
|
||||||
concat_files (signed_image_out, [binary_in, signature, cert_chain_out])
|
concat_files (signed_image_out, [binary_in, signature, cert_chain_out])
|
||||||
|
@ -1488,7 +1496,7 @@ def modify_elf_flags(env, elf_in_file_name,
|
||||||
|
|
||||||
# Check for corresponding number of segments
|
# Check for corresponding number of segments
|
||||||
if len(segment_list) is not elf_header.e_phnum:
|
if len(segment_list) is not elf_header.e_phnum:
|
||||||
raise RuntimeError, 'SCL file and ELF file have different number of segments!'
|
raise RuntimeError('SCL file and ELF file have different number of segments!')
|
||||||
|
|
||||||
# Go to the start of the p_flag entry in the first program header
|
# Go to the start of the p_flag entry in the first program header
|
||||||
file_offset = elf_header.e_phoff + phdr_flag_off
|
file_offset = elf_header.e_phoff + phdr_flag_off
|
||||||
|
@ -1595,11 +1603,11 @@ def generate_code_hash(env, elf_in_file_name):
|
||||||
(curr_phdr.p_flags & PH_PERM_MASK) == PH_PERM_RX and
|
(curr_phdr.p_flags & PH_PERM_MASK) == PH_PERM_RX and
|
||||||
curr_pages == code_seg_pages):
|
curr_pages == code_seg_pages):
|
||||||
if (code_seg_idx != -1):
|
if (code_seg_idx != -1):
|
||||||
raise RuntimeError, 'Multiple code segments match for: ' + code_seg_pages + ' pages'
|
raise RuntimeError('Multiple code segments match for: ' + code_seg_pages + ' pages')
|
||||||
code_seg_idx = i
|
code_seg_idx = i
|
||||||
|
|
||||||
if (code_seg_idx == -1):
|
if (code_seg_idx == -1):
|
||||||
raise RuntimeError, 'No matching code segment found'
|
raise RuntimeError('No matching code segment found')
|
||||||
|
|
||||||
code_phdr = phdr_table[code_seg_idx]
|
code_phdr = phdr_table[code_seg_idx]
|
||||||
|
|
||||||
|
@ -1673,7 +1681,7 @@ def readSCL(filename, global_dict):
|
||||||
# Token 1: Segment Name
|
# Token 1: Segment Name
|
||||||
# Token 2: Start Address -- not used in MBN tools
|
# Token 2: Start Address -- not used in MBN tools
|
||||||
if len(tokens) < 2:
|
if len(tokens) < 2:
|
||||||
raise RuntimeError, 'SCL Segment Syntax malformed: ' + previous_line
|
raise RuntimeError('SCL Segment Syntax malformed: ' + previous_line)
|
||||||
|
|
||||||
# Get the segment flags corresponding to the segment name description
|
# Get the segment flags corresponding to the segment name description
|
||||||
new_scl_entry.flag = getSegmentFlag(tokens[0].strip(strip_chars))
|
new_scl_entry.flag = getSegmentFlag(tokens[0].strip(strip_chars))
|
||||||
|
@ -1720,7 +1728,7 @@ def getSegmentFlag(seg_info):
|
||||||
UNSECURE = "UNSECURE"
|
UNSECURE = "UNSECURE"
|
||||||
|
|
||||||
if seg_info is None or len(seg_info) is 0:
|
if seg_info is None or len(seg_info) is 0:
|
||||||
raise RuntimeError, 'Invalid segment information passed: ' + seg_info
|
raise RuntimeError('Invalid segment information passed: ' + seg_info)
|
||||||
|
|
||||||
# Conditional checks and assignments of the corresponding segment flag values
|
# Conditional checks and assignments of the corresponding segment flag values
|
||||||
if NOTPAGEABLE in seg_info:
|
if NOTPAGEABLE in seg_info:
|
||||||
|
@ -1782,7 +1790,7 @@ def getSegmentFlag(seg_info):
|
||||||
ret_val = MI_PBT_ELF_UNSECURE_SEGMENT
|
ret_val = MI_PBT_ELF_UNSECURE_SEGMENT
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise RuntimeError, 'The segment name is wrongly defined in the SCL file: ' + seg_info
|
raise RuntimeError('The segment name is wrongly defined in the SCL file: ' + seg_info)
|
||||||
|
|
||||||
return ret_val
|
return ret_val
|
||||||
|
|
||||||
|
@ -1793,7 +1801,7 @@ def getSegmentFlag(seg_info):
|
||||||
def pad_file(fp, num_bytes, value):
|
def pad_file(fp, num_bytes, value):
|
||||||
|
|
||||||
if num_bytes < 0:
|
if num_bytes < 0:
|
||||||
raise RuntimeError, "Number of bytes to pad must be greater than zero"
|
raise RuntimeError("Number of bytes to pad must be greater than zero")
|
||||||
|
|
||||||
while num_bytes > 0:
|
while num_bytes > 0:
|
||||||
fp.write('%c' % value)
|
fp.write('%c' % value)
|
||||||
|
@ -1862,7 +1870,7 @@ def generate_global_dict(env):
|
||||||
def populate_dictionary(*args):
|
def populate_dictionary(*args):
|
||||||
|
|
||||||
if len(args) < 1:
|
if len(args) < 1:
|
||||||
raise RuntimeError, "At least 1 file must be specified as an input"
|
raise RuntimeError("At least 1 file must be specified as an input")
|
||||||
|
|
||||||
global_dict = {}
|
global_dict = {}
|
||||||
Fields = ["Define", "Key", "Value"]
|
Fields = ["Define", "Key", "Value"]
|
||||||
|
@ -1915,11 +1923,11 @@ def filter_dictionary(env, global_dict, **kwargs):
|
||||||
# Check for Image Type
|
# Check for Image Type
|
||||||
# If IMAGE_TYPE parameter is not provided, raise error
|
# If IMAGE_TYPE parameter is not provided, raise error
|
||||||
if not kwargs.has_key('IMAGE_TYPE'):
|
if not kwargs.has_key('IMAGE_TYPE'):
|
||||||
raise RuntimeError, "IMAGE_TYPE must be defined to use FilterDictionary."
|
raise RuntimeError("IMAGE_TYPE must be defined to use FilterDictionary.")
|
||||||
else:
|
else:
|
||||||
image_type = kwargs.get('IMAGE_TYPE')
|
image_type = kwargs.get('IMAGE_TYPE')
|
||||||
if type(image_type) is not str:
|
if type(image_type) is not str:
|
||||||
raise RuntimeError, "IMAGE_TYPE must be of string type."
|
raise RuntimeError("IMAGE_TYPE must be of string type.")
|
||||||
|
|
||||||
# Check for Flash Type
|
# Check for Flash Type
|
||||||
# If FLASH_TYPE parameter is not provided, default to 'nand'
|
# If FLASH_TYPE parameter is not provided, default to 'nand'
|
||||||
|
@ -1928,7 +1936,7 @@ def filter_dictionary(env, global_dict, **kwargs):
|
||||||
else:
|
else:
|
||||||
flash_type = kwargs.get('FLASH_TYPE')
|
flash_type = kwargs.get('FLASH_TYPE')
|
||||||
if type(flash_type) is not str:
|
if type(flash_type) is not str:
|
||||||
raise RuntimeError, "FLASH_TYPE must be of string type. "
|
raise RuntimeError("FLASH_TYPE must be of string type. ")
|
||||||
|
|
||||||
# Check for MBN Type
|
# Check for MBN Type
|
||||||
# If MBN_TYPE parameter is not provided, default to 'elf'
|
# If MBN_TYPE parameter is not provided, default to 'elf'
|
||||||
|
@ -1937,7 +1945,7 @@ def filter_dictionary(env, global_dict, **kwargs):
|
||||||
else:
|
else:
|
||||||
mbn_type = kwargs.get('MBN_TYPE')
|
mbn_type = kwargs.get('MBN_TYPE')
|
||||||
if mbn_type != 'elf' and mbn_type != 'bin':
|
if mbn_type != 'elf' and mbn_type != 'bin':
|
||||||
raise RuntimeError, "MBN_TYPE currently not supported: " + mbn_type
|
raise RuntimeError("MBN_TYPE currently not supported: " + mbn_type)
|
||||||
|
|
||||||
# Check for Image ID
|
# Check for Image ID
|
||||||
# If IMAGE_ID parameter is not provided, default to ID 0
|
# If IMAGE_ID parameter is not provided, default to ID 0
|
||||||
|
@ -1946,7 +1954,7 @@ def filter_dictionary(env, global_dict, **kwargs):
|
||||||
else:
|
else:
|
||||||
image_id = kwargs.get('IMAGE_ID')
|
image_id = kwargs.get('IMAGE_ID')
|
||||||
if type(image_id) is not int:
|
if type(image_id) is not int:
|
||||||
raise RuntimeError, "IMAGE_ID must be of integer type."
|
raise RuntimeError("IMAGE_ID must be of integer type.")
|
||||||
|
|
||||||
# Initialize
|
# Initialize
|
||||||
gen_dict = {}
|
gen_dict = {}
|
||||||
|
@ -1971,9 +1979,9 @@ def filter_dictionary(env, global_dict, **kwargs):
|
||||||
if template_key_match in global_dict:
|
if template_key_match in global_dict:
|
||||||
image_dest = global_dict[template_key_match]
|
image_dest = global_dict[template_key_match]
|
||||||
else:
|
else:
|
||||||
raise RuntimeError, "Builds file does not have IMAGE_KEY pair for: " + image_type
|
raise RuntimeError("Builds file does not have IMAGE_KEY pair for: " + image_type)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError, "MBN_TYPE currently not supported: " + mbn_type
|
raise RuntimeError("MBN_TYPE currently not supported: " + mbn_type)
|
||||||
|
|
||||||
# Assign generic dictionary key/value pairs
|
# Assign generic dictionary key/value pairs
|
||||||
gen_dict['IMAGE_KEY_IMAGE_ID'] = id
|
gen_dict['IMAGE_KEY_IMAGE_ID'] = id
|
||||||
|
@ -2004,7 +2012,7 @@ def filter_dictionary(env, global_dict, **kwargs):
|
||||||
gen_dict['IMAGE_KEY_OEM_NUM_ROOT_CERTS'] = oem_num_root_certs
|
gen_dict['IMAGE_KEY_OEM_NUM_ROOT_CERTS'] = oem_num_root_certs
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise RuntimeError, "Invalid OEM root certificate configuration values"
|
raise RuntimeError("Invalid OEM root certificate configuration values")
|
||||||
|
|
||||||
# Assign additional dictionary key/values pair as needed by tools.
|
# Assign additional dictionary key/values pair as needed by tools.
|
||||||
|
|
||||||
|
@ -2034,7 +2042,7 @@ def preprocess_elf_file(elf_file_name):
|
||||||
elf_header = Elf_Ehdr_common(elf_fp.read(ELF_HDR_COMMON_SIZE))
|
elf_header = Elf_Ehdr_common(elf_fp.read(ELF_HDR_COMMON_SIZE))
|
||||||
|
|
||||||
if verify_elf_header(elf_header) is False:
|
if verify_elf_header(elf_header) is False:
|
||||||
raise RuntimeError, "ELF file failed verification: " + elf_file_name
|
raise RuntimeError("ELF file failed verification: " + elf_file_name)
|
||||||
|
|
||||||
elf_fp.seek(0)
|
elf_fp.seek(0)
|
||||||
|
|
||||||
|
@ -2047,7 +2055,7 @@ def preprocess_elf_file(elf_file_name):
|
||||||
|
|
||||||
# Verify ELF header information
|
# Verify ELF header information
|
||||||
if verify_elf_header(elf_header) is False:
|
if verify_elf_header(elf_header) is False:
|
||||||
raise RuntimeError, "ELF file failed verification: " + elf_file_name
|
raise RuntimeError("ELF file failed verification: " + elf_file_name)
|
||||||
|
|
||||||
# Get program header size
|
# Get program header size
|
||||||
phdr_size = elf_header.e_phentsize
|
phdr_size = elf_header.e_phentsize
|
||||||
|
@ -2097,17 +2105,26 @@ def get_hash_address(elf_file_name):
|
||||||
# Verify ELF header contents from an input ELF file
|
# Verify ELF header contents from an input ELF file
|
||||||
#----------------------------------------------------------------------------
|
#----------------------------------------------------------------------------
|
||||||
def verify_elf_header(elf_header):
|
def verify_elf_header(elf_header):
|
||||||
if (elf_header.e_ident[ELFINFO_MAG0_INDEX] != ELFINFO_MAG0) or \
|
if (elf_header.e_ident[ELFINFO_MAG0_INDEX] != ELFINFO_MAG0):
|
||||||
(elf_header.e_ident[ELFINFO_MAG1_INDEX] != ELFINFO_MAG1) or \
|
print("MAG0[{:d}]\n".format((elf_header.e_ident[ELFINFO_MAG0_INDEX])))
|
||||||
(elf_header.e_ident[ELFINFO_MAG2_INDEX] != ELFINFO_MAG2) or \
|
|
||||||
(elf_header.e_ident[ELFINFO_MAG3_INDEX] != ELFINFO_MAG3) or \
|
|
||||||
((elf_header.e_ident[ELFINFO_CLASS_INDEX] != ELFINFO_CLASS_64) and \
|
|
||||||
(elf_header.e_ident[ELFINFO_CLASS_INDEX] != ELFINFO_CLASS_32)) or \
|
|
||||||
(elf_header.e_ident[ELFINFO_VERSION_INDEX] != ELFINFO_VERSION_CURRENT):
|
|
||||||
|
|
||||||
return False
|
return False
|
||||||
else:
|
if (elf_header.e_ident[ELFINFO_MAG1_INDEX] != ELFINFO_MAG1):
|
||||||
return True
|
print("MAG1[{:d}]\n".format((elf_header.e_ident[ELFINFO_MAG1_INDEX])))
|
||||||
|
return False
|
||||||
|
if (elf_header.e_ident[ELFINFO_MAG2_INDEX] != ELFINFO_MAG2):
|
||||||
|
print("MAG2[{:d}]\n".format((elf_header.e_ident[ELFINFO_MAG2_INDEX])))
|
||||||
|
return False
|
||||||
|
if (elf_header.e_ident[ELFINFO_MAG3_INDEX] != ELFINFO_MAG3):
|
||||||
|
print("MAG3[{:d}]\n".format((elf_header.e_ident[ELFINFO_MAG3_INDEX])))
|
||||||
|
return False
|
||||||
|
if ((elf_header.e_ident[ELFINFO_CLASS_INDEX] != ELFINFO_CLASS_64) and \
|
||||||
|
(elf_header.e_ident[ELFINFO_CLASS_INDEX] != ELFINFO_CLASS_32)):
|
||||||
|
print("ELFINFO_CLASS_INDEX[{:d}]\n".format((elf_header.e_ident[ELFINFO_CLASS_INDEX])))
|
||||||
|
return False
|
||||||
|
if (elf_header.e_ident[ELFINFO_VERSION_INDEX] != ELFINFO_VERSION_CURRENT):
|
||||||
|
print("ELFINFO_VERSION_INDEX[{:d}]\n".format((elf_header.e_ident[ELFINFO_VERSION_INDEX])))
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
#----------------------------------------------------------------------------
|
#----------------------------------------------------------------------------
|
||||||
# Perform file copy given offsets and the number of bytes to copy
|
# Perform file copy given offsets and the number of bytes to copy
|
||||||
|
@ -2159,9 +2176,9 @@ def initialize_hash_phdr(elf_in_file_name, hash_tbl_size, hdr_size, hdr_offset,
|
||||||
|
|
||||||
# Update the hash table program header
|
# Update the hash table program header
|
||||||
if is_elf64 is True:
|
if is_elf64 is True:
|
||||||
hash_Phdr = Elf64_Phdr('\0'*ELF64_PHDR_SIZE)
|
hash_Phdr = Elf64_Phdr(b'\0'*ELF64_PHDR_SIZE)
|
||||||
else:
|
else:
|
||||||
hash_Phdr = Elf32_Phdr('\0'*ELF32_PHDR_SIZE)
|
hash_Phdr = Elf32_Phdr(b'\0'*ELF32_PHDR_SIZE)
|
||||||
hash_Phdr.p_flags = MI_PBT_ELF_HASH_SEGMENT
|
hash_Phdr.p_flags = MI_PBT_ELF_HASH_SEGMENT
|
||||||
hash_Phdr.p_align = ELF_BLOCK_ALIGN
|
hash_Phdr.p_align = ELF_BLOCK_ALIGN
|
||||||
hash_Phdr.p_offset = hash_hdr_offset
|
hash_Phdr.p_offset = hash_hdr_offset
|
||||||
|
@ -2243,7 +2260,7 @@ def OPEN(file_name, mode):
|
||||||
try:
|
try:
|
||||||
fp = open(file_name, mode)
|
fp = open(file_name, mode)
|
||||||
except IOError:
|
except IOError:
|
||||||
raise RuntimeError, "The file could not be opened: " + file_name
|
raise RuntimeError("The file could not be opened: " + file_name)
|
||||||
|
|
||||||
# File open has succeeded with the given mode, return the file object
|
# File open has succeeded with the given mode, return the file object
|
||||||
return fp
|
return fp
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python3
|
||||||
# Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
# Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
||||||
#
|
#
|
||||||
# Redistribution and use in source and binary forms, with or without
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
@ -75,27 +75,27 @@ class NorSbl:
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.mbn_file_names = []
|
self.mbn_file_names = []
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print 'Reading ' + sbl1
|
print('Reading ' + sbl1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.sbl1 = open(sbl1, 'rb').read()
|
self.sbl1 = open(sbl1, 'rb').read()
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
print 'I/O error({0}): {1}'.format(e.errno, e.strerror)
|
print('I/O error({0}): {1}'.format(e.errno, e.strerror))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
(codeword, magic, _) = struct.unpack_from(
|
(codeword, magic, _) = struct.unpack_from(
|
||||||
self.NOR_SBL1_HEADER, self.sbl1)
|
self.NOR_SBL1_HEADER, self.sbl1)
|
||||||
|
|
||||||
if codeword != self.NOR_CODE_WORD:
|
if codeword != self.NOR_CODE_WORD:
|
||||||
print '\n\nError: Unexpected Codeword!'
|
print('\n\nError: Unexpected Codeword!')
|
||||||
print 'Codeword : ' + ('0x%x' % self.NOR_CODE_WORD) + \
|
print('Codeword : ' + ('0x%x' % self.NOR_CODE_WORD) + \
|
||||||
' != ' + ('0x%x' % codeword)
|
' != ' + ('0x%x' % codeword))
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
if magic != self.MAGIC_NUM:
|
if magic != self.MAGIC_NUM:
|
||||||
print '\n\nError: Unexpected Magic!'
|
print('\n\nError: Unexpected Magic!')
|
||||||
print 'Magic : ' + ('0x%x' % self.MAGIC_NUM) + \
|
print('Magic : ' + ('0x%x' % self.MAGIC_NUM) + \
|
||||||
' != ' + ('0x%x' % magic)
|
' != ' + ('0x%x' % magic))
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
def Append(self, src):
|
def Append(self, src):
|
||||||
|
@ -119,10 +119,10 @@ class NorSbl:
|
||||||
overflow = size % self.ALIGNMENT
|
overflow = size % self.ALIGNMENT
|
||||||
if overflow:
|
if overflow:
|
||||||
pad_size = self.ALIGNMENT - overflow
|
pad_size = self.ALIGNMENT - overflow
|
||||||
pad = '\377' * pad_size
|
pad = b'\377' * pad_size
|
||||||
outfile.write(pad)
|
outfile.write(pad)
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print 'Added %d byte padding' % pad_size
|
print('Added %d byte padding' % pad_size)
|
||||||
return pad_size
|
return pad_size
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
@ -142,11 +142,11 @@ class NorSbl:
|
||||||
|
|
||||||
for mbn_file_name in self.mbn_file_names:
|
for mbn_file_name in self.mbn_file_names:
|
||||||
total_size += self.PadOutput(outfile, total_size)
|
total_size += self.PadOutput(outfile, total_size)
|
||||||
mbn_file_data = open(mbn_file_name, 'r').read()
|
mbn_file_data = open(mbn_file_name, 'rb').read()
|
||||||
outfile.write(mbn_file_data)
|
outfile.write(mbn_file_data)
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print 'Added %s (%d bytes)' % (mbn_file_name,
|
print('Added %s (%d bytes)' % (mbn_file_name,
|
||||||
len(mbn_file_data))
|
len(mbn_file_data)))
|
||||||
total_size += len(mbn_file_data)
|
total_size += len(mbn_file_data)
|
||||||
|
|
||||||
outfile.seek(28)
|
outfile.seek(28)
|
||||||
|
@ -155,13 +155,13 @@ class NorSbl:
|
||||||
|
|
||||||
|
|
||||||
def Usage(v):
|
def Usage(v):
|
||||||
print '%s: [-v] [-h] [-o Output MBN] sbl1 sbl2 [bootblock]' % (
|
print('%s: [-v] [-h] [-o Output MBN] sbl1 sbl2 [bootblock]' % (
|
||||||
os.path.basename(sys.argv[0]))
|
os.path.basename(sys.argv[0])))
|
||||||
print
|
print()
|
||||||
print 'Concatenates up to three mbn files: two SBLs and a coreboot bootblock'
|
print('Concatenates up to three mbn files: two SBLs and a coreboot bootblock')
|
||||||
print ' -h This message'
|
print(' -h This message')
|
||||||
print ' -v verbose'
|
print(' -v verbose')
|
||||||
print ' -o Output file name, (default: %s)\n' % DEFAULT_OUTPUT_FILE_NAME
|
print(' -o Output file name, (default: %s)\n' % DEFAULT_OUTPUT_FILE_NAME)
|
||||||
sys.exit(v)
|
sys.exit(v)
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python3
|
||||||
#============================================================================
|
#============================================================================
|
||||||
#
|
#
|
||||||
#/** @file qgpt.py
|
#/** @file qgpt.py
|
||||||
|
|
Loading…
Reference in New Issue