Refactor
Some checks failed
Build Dusk / run-tests (push) Successful in 1m55s
Build Dusk / build-linux (push) Successful in 2m0s
Build Dusk / build-psp (push) Failing after 1m40s

This commit is contained in:
2026-01-25 21:23:09 -06:00
parent d788de8637
commit 07afc3813a
35 changed files with 51 additions and 61 deletions

View File

@@ -0,0 +1,33 @@
import sys
# from processtileset import processTileset
from tools.asset.process.image import processImage
from tools.asset.process.palette import processPalette
from tools.asset.process.tileset import processTileset
from tools.asset.process.map import processMap
from tools.asset.process.language import processLanguage
from tools.asset.process.script import processScript
processedAssets = []
def processAsset(asset):
if asset['path'] in processedAssets:
return
processedAssets.append(asset['path'])
# Handle tiled tilesets
t = asset['type'].lower()
if t == 'palette':
return processPalette(asset)
elif t == 'image':
return processImage(asset)
elif t == 'tileset':
return processTileset(asset)
elif t == 'map':
return processMap(asset)
elif t == 'language':
return processLanguage(asset)
elif t == 'script':
return processScript(asset)
else:
print(f"Error: Unknown asset type '{asset['type']}' for path '{asset['path']}'")
sys.exit(1)

View File

@@ -0,0 +1,120 @@
import os
import sys
from PIL import Image
from tools.asset.process.palette import extractPaletteFromImage, palettes
from tools.asset.args import args
from tools.asset.path import getAssetRelativePath
from tools.asset.cache import assetGetCache, assetCache
images = []
def processImage(asset):
cache = assetGetCache(asset['path'])
if cache is not None:
return cache
type = None
if 'type' in asset['options']:
type = asset['options'].get('type', 'PALETTIZED').upper()
if type == 'PALETTIZED' or type is None:
return assetCache(asset['path'], processPalettizedImage(asset))
elif type == 'ALPHA':
return assetCache(asset['path'], processAlphaImage(asset))
else:
print(f"Error: Unknown image type {type} for asset {asset['path']}")
sys.exit(1)
def processPalettizedImage(asset):
assetPath = asset['path']
cache = assetGetCache(assetPath)
if cache is not None:
return cache
image = Image.open(assetPath)
imagePalette = extractPaletteFromImage(image)
# Find palette that contains every color
for palette in palettes:
if all(color in palette['pixels'] for color in imagePalette):
break
else:
palette = palettes[0] # Just to avoid reference error
print(f"No matching palette found for {assetPath}!")
# Find which pixel is missing
for color in imagePalette:
if color in palette['pixels']:
continue
# Convert to hex (with alpha)
hexColor = '#{:02x}{:02x}{:02x}{:02x}'.format(color[0], color[1], color[2], color[3])
print(f"Missing color: {hexColor} in palette {palette['paletteName']}")
sys.exit(1)
print(f"Converting image {assetPath} to use palette")
paletteIndexes = []
for pixel in list(image.getdata()):
if pixel[3] == 0:
pixel = (0, 0, 0, 0)
paletteIndex = palette['pixels'].index(pixel)
paletteIndexes.append(paletteIndex)
data = bytearray()
data.extend(b"DPI") # Dusk Palettized Image
data.extend(image.width.to_bytes(4, 'little')) # Width
data.extend(image.height.to_bytes(4, 'little')) # Height
data.append(palette['paletteIndex']) # Palette index
for paletteIndex in paletteIndexes:
if paletteIndex > 255 or paletteIndex < 0:
print(f"Error: Palette index {paletteIndex} exceeds 255!")
sys.exit(1)
data.append(paletteIndex.to_bytes(1, 'little')[0]) # Pixel index
relative = getAssetRelativePath(assetPath)
fileNameWithoutExt = os.path.splitext(os.path.basename(assetPath))[0]
outputFileRelative = os.path.join(os.path.dirname(relative), f"{fileNameWithoutExt}.dpi")
outputFilePath = os.path.join(args.output_assets, outputFileRelative)
os.makedirs(os.path.dirname(outputFilePath), exist_ok=True)
with open(outputFilePath, "wb") as f:
f.write(data)
outImage = {
"imagePath": outputFileRelative,
"files": [ outputFilePath ],
'width': image.width,
'height': image.height,
}
return assetCache(assetPath, outImage)
def processAlphaImage(asset):
assetPath = asset['path']
cache = assetGetCache(assetPath)
if cache is not None:
return cache
print(f"Processing alpha image: {assetPath}")
data = bytearray()
data.extend(b"DAI") # Dusk Alpha Image
image = Image.open(assetPath).convert("RGBA")
data.extend(image.width.to_bytes(4, 'little')) # Width
data.extend(image.height.to_bytes(4, 'little')) # Height
for pixel in list(image.getdata()):
# Only write alpha channel
data.append(pixel[3].to_bytes(1, 'little')[0]) # Pixel alpha
relative = getAssetRelativePath(assetPath)
fileNameWithoutExt = os.path.splitext(os.path.basename(assetPath))[0]
outputFileRelative = os.path.join(os.path.dirname(relative), f"{fileNameWithoutExt}.dai")
outputFilePath = os.path.join(args.output_assets, outputFileRelative)
os.makedirs(os.path.dirname(outputFilePath), exist_ok=True)
with open(outputFilePath, "wb") as f:
f.write(data)
outImage = {
"imagePath": outputFileRelative,
"files": [ outputFilePath ],
'width': image.width,
'height': image.height,
}
return assetCache(assetPath, outImage)

View File

@@ -0,0 +1,193 @@
import sys
import os
from tools.asset.args import args
from tools.asset.cache import assetCache, assetGetCache
from tools.asset.path import getAssetRelativePath
from tools.dusk.defs import defs
import polib
import re
LANGUAGE_CHUNK_CHAR_COUNT = int(defs.get('ASSET_LANG_CHUNK_CHAR_COUNT'))
LANGUAGE_DATA = {}
LANGUAGE_KEYS = []
def processLanguageList():
# Language keys header data
headerKeys = "// Auto-generated language keys header file.\n"
headerKeys += "#pragma once\n"
headerKeys += "#include \"dusk.h\"\n\n"
# This is the desired chunk groups list.. if a language key STARTS with any
# of the keys in this list we would "like to" put it in that chunk group.
# If there is no match, or the list is full then we will add it to the next
# available chunk group (that isn't a 'desired' one). If the chunk becomes
# full, then we attempt to make another chunk with the same prefix so that
# a second batching can occur.
desiredChunkGroups = {
'ui': 0
}
# Now, for each language key, create the header reference and index.
keyIndex = 0
languageKeyIndexes = {}
languageKeyChunk = {}
languageKeyChunkIndexes = {}
languageKeyChunkOffsets = {}
for key in LANGUAGE_KEYS:
headerKeys += f"#define {getLanguageVariableName(key)} {keyIndex}\n"
languageKeyIndexes[key] = keyIndex
keyIndex += 1
# Find desired chunk group
assignedChunk = None
for desiredKey in desiredChunkGroups:
if key.lower().startswith(desiredKey):
assignedChunk = desiredChunkGroups[desiredKey]
break
# If no desired chunk group matched, assign to -1
if assignedChunk is None:
assignedChunk = -1
languageKeyChunk[key] = assignedChunk
# Setup header.
for lang in LANGUAGE_DATA:
if key not in LANGUAGE_DATA[lang]:
print(f"Warning: Missing translation for key '{key}' in language '{lang}'")
sys.exit(1)
# Seal the header.
headerKeys += f"\n#define LANG_KEY_COUNT {len(LANGUAGE_KEYS)}\n"
# Now we can generate the language string chunks.
nextChunkIndex = max(desiredChunkGroups.values()) + 1
files = []
for lang in LANGUAGE_DATA:
langData = LANGUAGE_DATA[lang]
# Key = chunkIndex, value = chunkInfo
languageChunks = {}
for key in LANGUAGE_KEYS:
keyIndex = languageKeyIndexes[key]
chunkIndex = languageKeyChunk[key]
wasSetChunk = chunkIndex != -1
# This will keep looping until we find a chunk
while True:
# Determine the next chunkIndex IF chunkIndex is -1
if chunkIndex == -1:
chunkIndex = nextChunkIndex
# Is the chunk full?
curLen = languageChunks.get(chunkIndex, {'len': 0})['len']
newLen = curLen + len(langData[key])
if newLen > LANGUAGE_CHUNK_CHAR_COUNT:
# Chunk is full, need to create a new chunk.
chunkIndex = -1
if wasSetChunk:
wasSetChunk = False
else:
nextChunkIndex += 1
continue
# Chunk is not full, we can use it.
if chunkIndex not in languageChunks:
languageChunks[chunkIndex] = {
'len': 0,
'keys': []
}
languageChunks[chunkIndex]['len'] = newLen
languageChunks[chunkIndex]['keys'].append(key)
languageKeyChunkIndexes[key] = chunkIndex
languageKeyChunkOffsets[key] = curLen
break
# We have now chunked all the keys for this language!
langBuffer = b""
# Write header info
langBuffer += b'DLF' # Dusk Language File
for key in LANGUAGE_KEYS:
# Write the chunk that this key belongs to as uint32_t
chunkIndex = languageKeyChunkIndexes[key]
langBuffer += chunkIndex.to_bytes(4, byteorder='little')
# Write the offset for this key as uint32_t
offset = languageKeyChunkOffsets[key]
langBuffer += offset.to_bytes(4, byteorder='little')
# Write the length of the string as uint32_t
strData = langData[key].encode('utf-8')
langBuffer += len(strData).to_bytes(4, byteorder='little')
# Now write out each chunk's string data, packed tight and no null term.
for chunkIndex in sorted(languageChunks.keys()):
chunkInfo = languageChunks[chunkIndex]
for key in chunkInfo['keys']:
strData = langData[key].encode('utf-8')
langBuffer += strData
# Now pad the chunk to full size
curLen = chunkInfo['len']
if curLen < LANGUAGE_CHUNK_CHAR_COUNT:
padSize = LANGUAGE_CHUNK_CHAR_COUNT - curLen
langBuffer += b'\0' * padSize
# Write out the language data file
outputFile = os.path.join(args.output_assets, "language", f"{lang}.dlf")
files.append(outputFile)
os.makedirs(os.path.dirname(outputFile), exist_ok=True)
with open(outputFile, "wb") as f:
f.write(langBuffer)
# Write out the language keys header file
outputFile = os.path.join(args.headers_dir, "locale", "language", "keys.h")
os.makedirs(os.path.dirname(outputFile), exist_ok=True)
with open(outputFile, "w") as f:
f.write(headerKeys)
return {
'files': files
}
def getLanguageVariableName(languageKey):
# Take the language key, prepend LANG_, uppercase, replace any non symbols
# with _
key = languageKey.strip().upper()
key = re.sub(r'[^A-Z0-9]', '_', key)
return f"LANG_{key}"
def processLanguage(asset):
cache = assetGetCache(asset['path'])
if cache is not None:
return cache
# Load PO File
po = polib.pofile(asset['path'])
langName = po.metadata.get('Language')
if langName not in LANGUAGE_DATA:
LANGUAGE_DATA[langName] = {}
for entry in po:
key = entry.msgid
val = entry.msgstr
if key not in LANGUAGE_KEYS:
LANGUAGE_KEYS.append(key)
if key not in LANGUAGE_DATA[langName]:
LANGUAGE_DATA[langName][key] = val
else:
print(f"Error: Duplicate translation key '{key}' in language '{langName}'")
sys.exit(1)
outLanguageData = {
'data': po,
'path': asset['path'],
'files': []
}
return assetCache(asset['path'], outLanguageData)

154
tools/asset/process/map.py Normal file
View File

@@ -0,0 +1,154 @@
import struct
import sys
import os
import json
from tools.asset.args import args
from tools.asset.cache import assetCache, assetGetCache
from tools.asset.path import getAssetRelativePath
from tools.dusk.defs import TILE_WIDTH, TILE_HEIGHT, TILE_DEPTH, CHUNK_WIDTH, CHUNK_HEIGHT, CHUNK_DEPTH, CHUNK_TILE_COUNT
from tools.dusk.map import Map
from tools.dusk.chunk import Chunk
def convertModelData(modelData):
# TLDR; Model data stores things efficiently with indices, but we buffer it
# out to 6 vertex quads for simplicity.
outVertices = []
outUVs = []
outColors = []
for indice in modelData['indices']:
vertex = modelData['vertices'][indice]
uv = modelData['uvs'][indice]
color = modelData['colors'][indice]
outVertices.append(vertex)
outUVs.append(uv)
outColors.append(color)
return {
'vertices': outVertices,
'uvs': outUVs,
'colors': outColors
}
def processChunk(chunk):
cache = assetGetCache(chunk.getFilename())
if cache:
return cache
baseModel = {
'vertices': [],
'colors': [],
'uvs': []
}
models = [ baseModel ]
for tileIndex, tile in chunk.tiles.items():
tileBase = tile.getBaseTileModel()
convertedBase = convertModelData(tileBase)
baseModel['vertices'].extend(convertedBase['vertices'])
baseModel['colors'].extend(convertedBase['colors'])
baseModel['uvs'].extend(convertedBase['uvs'])
# Generate binary buffer for efficient output
buffer = bytearray()
buffer.extend(b'DCF')# Header
buffer.extend(len(chunk.tiles).to_bytes(4, 'little')) # Number of tiles
buffer.extend(len(models).to_bytes(1, 'little')) # Number of models
buffer.extend(len(chunk.entities).to_bytes(1, 'little')) # Number of entities
# Buffer tile data as array of uint8_t
for tileIndex, tile in chunk.tiles.items():
buffer.extend(tile.shape.to_bytes(1, 'little'))
# # For each model
for model in models:
vertexCount = len(model['vertices'])
buffer.extend(vertexCount.to_bytes(4, 'little'))
for i in range(vertexCount):
vertex = model['vertices'][i]
uv = model['uvs'][i]
color = model['colors'][i]
buffer.extend(color[0].to_bytes(1, 'little'))
buffer.extend(color[1].to_bytes(1, 'little'))
buffer.extend(color[2].to_bytes(1, 'little'))
buffer.extend(color[3].to_bytes(1, 'little'))
buffer.extend(bytearray(struct.pack('<f', uv[0])))
buffer.extend(bytearray(struct.pack('<f', uv[1])))
buffer.extend(bytearray(struct.pack('<f', vertex[0])))
buffer.extend(bytearray(struct.pack('<f', vertex[1])))
buffer.extend(bytearray(struct.pack('<f', vertex[2])))
# For each entity
for entity in chunk.entities.values():
buffer.extend(entity.type.to_bytes(1, 'little'))
buffer.extend(entity.localX.to_bytes(1, 'little'))
buffer.extend(entity.localY.to_bytes(1, 'little'))
buffer.extend(entity.localZ.to_bytes(1, 'little'))
pass
# Write out map file
relative = getAssetRelativePath(chunk.getFilename())
fileNameWithoutExt = os.path.splitext(os.path.basename(relative))[0]
outputFileRelative = os.path.join(os.path.dirname(relative), f"{fileNameWithoutExt}.dcf")
outputFilePath = os.path.join(args.output_assets, outputFileRelative)
os.makedirs(os.path.dirname(outputFilePath), exist_ok=True)
with open(outputFilePath, "wb") as f:
f.write(buffer)
outChunk = {
'files': [ outputFilePath ],
'chunk': chunk
}
return assetCache(chunk.getFilename(), outChunk)
def processMap(asset):
cache = assetGetCache(asset['path'])
if cache is not None:
return cache
map = Map(None)
map.load(asset['path'])
chunksDir = map.getChunkDirectory()
files = os.listdir(chunksDir)
if len(files) == 0:
print(f"Error: No chunk files found in {chunksDir}.")
sys.exit(1)
chunkFiles = []
for fileName in files:
if not fileName.endswith('.json'):
continue
fNameNoExt = os.path.splitext(fileName)[0]
fnPieces = fNameNoExt.split('_')
if len(fnPieces) != 3:
print(f"Error: Chunk filename {fileName} does not contain valid chunk coordinates.")
sys.exit(1)
chunk = Chunk(map, int(fnPieces[0]), int(fnPieces[1]), int(fnPieces[2]))
chunk.load()
result = processChunk(chunk)
chunkFiles.extend(result['files'])
# Map file
outBuffer = bytearray()
outBuffer.extend(b'DMF')
outBuffer.extend(len(chunkFiles).to_bytes(4, 'little'))
# DMF (Dusk Map file)
fileRelative = getAssetRelativePath(asset['path'])
fileNameWithoutExt = os.path.splitext(os.path.basename(fileRelative))[0]
outputMapRelative = os.path.join(os.path.dirname(fileRelative), f"{fileNameWithoutExt}.dmf")
outputMapPath = os.path.join(args.output_assets, outputMapRelative)
os.makedirs(os.path.dirname(outputMapPath), exist_ok=True)
with open(outputMapPath, "wb") as f:
f.write(outBuffer)
outMap = {
'files': chunkFiles
}
outMap['files'].append(outputMapPath)
return assetCache(asset['path'], outMap)

View File

@@ -0,0 +1,94 @@
import os
from PIL import Image
import datetime
from tools.asset.args import args
from tools.asset.cache import assetCache, assetGetCache
palettes = []
def extractPaletteFromImage(image):
# goes through and finds all unique colors in the image
if image.mode != 'RGBA':
image = image.convert('RGBA')
pixels = list(image.getdata())
uniqueColors = []
for color in pixels:
# We treat all alpha 0 as rgba(0,0,0,0) for palette purposes
if color[3] == 0:
color = (0, 0, 0, 0)
if color not in uniqueColors:
uniqueColors.append(color)
return uniqueColors
def processPalette(asset):
print(f"Processing palette: {asset['path']}")
cache = assetGetCache(asset['path'])
if cache is not None:
return cache
paletteIndex = len(palettes)
image = Image.open(asset['path'])
pixels = extractPaletteFromImage(image)
fileNameWithoutExt = os.path.splitext(os.path.basename(asset['path']))[0]
fileNameWithoutPalette = os.path.splitext(fileNameWithoutExt)[0]
# PSP requires that the palette size be a power of two, so we will pad the
# palette with transparent colors if needed.
def mathNextPowTwo(x):
return 1 << (x - 1).bit_length()
nextPowTwo = mathNextPowTwo(len(pixels))
while len(pixels) < nextPowTwo:
pixels.append((0, 0, 0, 0))
# Header
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data = f"// Palette Generated for {asset['path']} at {now}\n"
data += f"#include \"display/palette/palette.h\"\n\n"
data += f"#define PALETTE_{paletteIndex}_COLOR_COUNT {len(pixels)}\n\n"
data += f"#pragma pack(push, 1)\n"
data += f"static const color_t PALETTE_{paletteIndex}_COLORS[PALETTE_{paletteIndex}_COLOR_COUNT] = {{\n"
for pixel in pixels:
data += f" {{ 0x{pixel[0]:02X}, 0x{pixel[1]:02X}, 0x{pixel[2]:02X}, 0x{pixel[3]:02X} }},\n"
data += f"}};\n"
data += f"#pragma pack(pop)\n\n"
data += f"static const palette_t PALETTE_{paletteIndex} = {{\n"
data += f" .colorCount = PALETTE_{paletteIndex}_COLOR_COUNT,\n"
data += f" .colors = PALETTE_{paletteIndex}_COLORS,\n"
data += f"}};\n"
# Write Header
outputFile = os.path.join(args.headers_dir, "display", "palette", f"palette_{paletteIndex}.h")
os.makedirs(os.path.dirname(outputFile), exist_ok=True)
with open(outputFile, "w") as f:
f.write(data)
palette = {
"paletteIndex": paletteIndex,
"paletteName": fileNameWithoutPalette,
"pixels": pixels,
"headerFile": os.path.relpath(outputFile, args.headers_dir),
"asset": asset,
"files": [ ],# No zippable files.
}
palettes.append(palette)
return assetCache(asset['path'], palette)
def processPaletteList():
data = f"// Auto-generated palette list\n"
for palette in palettes:
data += f"#include \"{palette['headerFile']}\"\n"
data += f"\n"
data += f"#define PALETTE_LIST_COUNT {len(palettes)}\n\n"
data += f"static const palette_t* PALETTE_LIST[PALETTE_LIST_COUNT] = {{\n"
for palette in palettes:
data += f" &PALETTE_{palette['paletteIndex']},\n"
data += f"}};\n"
# Write the palette list to a header file
outputFile = os.path.join(args.headers_dir, "display", "palette", "palettelist.h")
os.makedirs(os.path.dirname(outputFile), exist_ok=True)
with open(outputFile, "w") as f:
f.write(data)

View File

@@ -0,0 +1,43 @@
import sys
import os
from tools.asset.args import args
from tools.asset.cache import assetCache, assetGetCache
from tools.asset.path import getAssetRelativePath
from tools.dusk.defs import fileDefs
def processScript(asset):
cache = assetGetCache(asset['path'])
if cache is not None:
return cache
# Load the lua file as a string
with open(asset['path'], 'r', encoding='utf-8') as f:
luaCode = f.read()
# TODO: I will precompile or minify the Lua code here in the future
# Replace all definitions in the code
for key, val in fileDefs.items():
luaCode = luaCode.replace(key, str(val))
# Create output Dusk Script File (DSF) data
data = ""
data += "DSF"
data += luaCode
# Write to relative output file path.
relative = getAssetRelativePath(asset['path'])
fileNameWithoutExt = os.path.splitext(os.path.basename(asset['path']))[0]
outputFileRelative = os.path.join(os.path.dirname(relative), f"{fileNameWithoutExt}.dsf")
outputFilePath = os.path.join(args.output_assets, outputFileRelative)
os.makedirs(os.path.dirname(outputFilePath), exist_ok=True)
with open(outputFilePath, "wb") as f:
f.write(data.encode('utf-8'))
outScript = {
'data': data,
'path': asset['path'],
'files': [ outputFilePath ],
'scriptPath': outputFileRelative,
}
return assetCache(asset['path'], outScript)

View File

@@ -0,0 +1,177 @@
import json
import sys
import os
import datetime
from xml.etree import ElementTree
from tools.asset.process.image import processImage
from tools.asset.path import getAssetRelativePath
from tools.asset.args import args
from tools.asset.cache import assetGetCache, assetCache
tilesets = []
def loadTilesetFromTSX(asset):
# Load the TSX file
tree = ElementTree.parse(asset['path'])
root = tree.getroot()
# Expect tileheight, tilewidth, columns and tilecount attributes
if 'tilewidth' not in root.attrib or 'tileheight' not in root.attrib or 'columns' not in root.attrib or 'tilecount' not in root.attrib:
print(f"Error: TSX file {asset['path']} is missing required attributes (tilewidth, tileheight, columns, tilecount)")
sys.exit(1)
tileWidth = int(root.attrib['tilewidth'])
tileHeight = int(root.attrib['tileheight'])
columns = int(root.attrib['columns'])
tileCount = int(root.attrib['tilecount'])
rows = (tileCount + columns - 1) // columns # Calculate rows based on tileCount and columns
# Find the image element
imageElement = root.find('image')
if imageElement is None or 'source' not in imageElement.attrib:
print(f"Error: TSX file {asset['path']} is missing an image element with a source attribute")
sys.exit(1)
imagePath = imageElement.attrib['source']
# Image is relative to the TSX file
imageAssetPath = os.path.join(os.path.dirname(asset['path']), imagePath)
image = processImage({
'path': imageAssetPath,
'options': asset['options'],
})
return {
"image": image,
"tileWidth": tileWidth,
"tileHeight": tileHeight,
"columns": columns,
"rows": rows,
"originalWidth": tileWidth * columns,
"originalHeight": tileHeight * rows,
}
def loadTilesetFromArgs(asset):
# We need to determine how big each tile is. This can either be provided as
# an arg of tileWidth/tileHeight or as a count of rows/columns.
# Additionally, if the image has been factored, then the user can provide both
# tile sizes AND cols/rows to indicate the original size of the image.
image = processImage(asset)
tileWidth, tileHeight = None, None
columns, rows = None, None
originalWidth, originalHeight = image['width'], image['height']
if 'tileWidth' in asset['options'] and 'columns' in asset['options']:
tileWidth = int(asset['options']['tileWidth'])
columns = int(asset['options']['columns'])
originalWidth = tileWidth * columns
elif 'tileWidth' in asset['options']:
tileWidth = int(asset['options']['tileWidth'])
columns = image['width'] // tileWidth
elif 'columns' in asset['options']:
columns = int(asset['options']['columns'])
tileWidth = image['width'] // columns
else:
print(f"Error: Tileset {asset['path']} must specify either tileWidth or columns")
sys.exit(1)
if 'tileHeight' in asset['options'] and 'rows' in asset['options']:
tileHeight = int(asset['options']['tileHeight'])
rows = int(asset['options']['rows'])
originalHeight = tileHeight * rows
elif 'tileHeight' in asset['options']:
tileHeight = int(asset['options']['tileHeight'])
rows = image['height'] // tileHeight
elif 'rows' in asset['options']:
rows = int(asset['options']['rows'])
tileHeight = image['height'] // rows
else:
print(f"Error: Tileset {asset['path']} must specify either tileHeight or rows")
sys.exit(1)
return {
"image": image,
"tileWidth": tileWidth,
"tileHeight": tileHeight,
"columns": columns,
"rows": rows,
"originalWidth": originalWidth,
"originalHeight": originalHeight,
}
def processTileset(asset):
cache = assetGetCache(asset['path'])
if cache is not None:
return cache
print(f"Processing tileset: {asset['path']}")
tilesetData = None
if asset['path'].endswith('.tsx'):
tilesetData = loadTilesetFromTSX(asset)
else:
tilesetData = loadTilesetFromArgs(asset)
fileNameWithoutExtension = os.path.splitext(os.path.basename(asset['path']))[0]
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
tilesetName = fileNameWithoutExtension
tilesetNameUpper = tilesetName.upper()
widthScale = tilesetData['originalWidth'] / tilesetData['image']['width']
heightScale = tilesetData['originalHeight'] / tilesetData['image']['height']
# Create header
data = f"// Tileset Generated for {asset['path']} at {now}\n"
data += f"#pragma once\n"
data += f"#include \"display/tileset/tileset.h\"\n\n"
data += f"static const tileset_t TILESET_{tilesetNameUpper} = {{\n"
data += f" .tileWidth = {tilesetData['tileWidth']},\n"
data += f" .tileHeight = {tilesetData['tileHeight']},\n"
data += f" .tileCount = {tilesetData['columns'] * tilesetData['rows']},\n"
data += f" .columns = {tilesetData['columns']},\n"
data += f" .rows = {tilesetData['rows']},\n"
data += f" .uv = {{ {widthScale / tilesetData['columns']}f, {heightScale / tilesetData['rows']}f }},\n"
data += f" .image = {json.dumps(tilesetData['image']['imagePath'])},\n"
data += f"}};\n"
# Write Header
outputFile = os.path.join(args.headers_dir, "display", "tileset", f"tileset_{tilesetName}.h")
os.makedirs(os.path.dirname(outputFile), exist_ok=True)
with open(outputFile, 'w') as f:
f.write(data)
print(f"Write header for tileset: {outputFile}")
tileset = {
"files": [],
"image": tilesetData['image'],
"headerFile": os.path.relpath(outputFile, args.headers_dir),
"tilesetName": tilesetName,
"tilesetNameUpper": tilesetNameUpper,
"tilesetIndex": len(tilesets),
"tilesetData": tilesetData,
"files": tilesetData['image']['files'],
}
tilesets.append(tileset)
return assetCache(asset['path'], tileset)
def processTilesetList():
data = f"// Tileset List Generated at {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
data += f"#pragma once\n"
for tileset in tilesets:
data += f"#include \"{tileset['headerFile']}\"\n"
data += f"\n"
data += f"#define TILESET_LIST_COUNT {len(tilesets)}\n\n"
data += f"static const tileset_t* TILESET_LIST[TILESET_LIST_COUNT] = {{\n"
for tileset in tilesets:
data += f" &TILESET_{tileset['tilesetNameUpper']},\n"
data += f"}};\n"
# Write header.
outputFile = os.path.join(args.headers_dir, "display", "tileset", f"tilesetlist.h")
os.makedirs(os.path.dirname(outputFile), exist_ok=True)
with open(outputFile, 'w') as f:
f.write(data)