diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000..b0f3cd7 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,66 @@ +name: Build and push Docker image + +on: + push: + branches: [skp2blend] + paths: + - "skp2blend/**" + - ".github/workflows/docker.yml" + workflow_dispatch: + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}/skp2blend + SDK_RELEASE_URL: https://github.com/RedHaloStudio/Sketchup_Importer/releases/download/0.27.0/sketchup_importer-0.27.zip + +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Download SketchUp SDK from upstream release + run: | + wget -qO /tmp/sdk.zip "$SDK_RELEASE_URL" + unzip -q /tmp/sdk.zip -d /tmp/sdk + + # Place DLLs where the Dockerfile expects them + mkdir -p skp2blend/sketchup_sdk/binaries/sketchup/x64 + cp /tmp/sdk/sketchup_importer/SketchUpAPI.dll \ + /tmp/sdk/sketchup_importer/SketchUpCommonPreferences.dll \ + skp2blend/sketchup_sdk/binaries/sketchup/x64/ + + # Place the Python 3.11 compiled extension + cp /tmp/sdk/sketchup_importer/sketchup.cp311-win_amd64.pyd \ + skp2blend/sketchup.pyd + + rm -rf /tmp/sdk /tmp/sdk.zip + + - name: Log in to GHCR + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=sha,prefix= + type=raw,value=latest + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: skp2blend + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/skp2blend/.dockerignore b/skp2blend/.dockerignore new file mode 100644 index 0000000..8d35cb3 --- /dev/null +++ b/skp2blend/.dockerignore @@ -0,0 +1,2 @@ +__pycache__ +*.pyc diff --git a/skp2blend/Dockerfile b/skp2blend/Dockerfile new file mode 100644 index 0000000..c28685b --- /dev/null +++ b/skp2blend/Dockerfile @@ -0,0 +1,76 @@ +# =========================================================================== +# Two-stage SKP-to-Blend converter — Docker image +# +# Build-time prerequisites (not redistributable — user must supply): +# sketchup_sdk/ SketchUp C SDK for Windows x64 +# binaries/sketchup/x64/SketchUpAPI.dll (+ companion DLLs) +# sketchup.pyd Compiled Cython extension for Windows Python 3.11 x64 +# (built via: python setup.py build_ext --inplace) +# +# Build: +# docker build -t skp2blend . +# +# Run: +# docker run --rm -v /path/to/files:/data skp2blend /data/model.skp /data/model.blend +# =========================================================================== + +FROM scottyhardy/docker-wine:stable-11.0 + +USER root +ENV DEBIAN_FRONTEND=noninteractive + +# ── Extra packages not in the base image ───────────────────────────────── +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + unzip xz-utils \ + libxi6 libxrender1 libxfixes3 libglib2.0-0 libxkbcommon0 \ + libgl1 libsm6 \ + && rm -rf /var/lib/apt/lists/* + +# ── Wine prefix init ──────────────────────────────────────────────────── +ENV WINEDEBUG=-all +ENV WINEPREFIX=/root/.wine +ENV WINEARCH=win64 + +RUN xvfb-run -a wine wineboot --init && wineserver -w + +# ── Windows Python 3.11 (embeddable) ─────────────────────────────────── +RUN wget -qO /tmp/python-embed.zip \ + "https://www.python.org/ftp/python/3.11.9/python-3.11.9-embed-amd64.zip" && \ + mkdir -p "${WINEPREFIX}/drive_c/Python311" && \ + cd "${WINEPREFIX}/drive_c/Python311" && \ + unzip /tmp/python-embed.zip && \ + rm /tmp/python-embed.zip + +# Enable import of .py files next to the embedded Python +RUN sed -i 's/^#import site/import site/' \ + "${WINEPREFIX}/drive_c/Python311/python311._pth" + +# ── SketchUp SDK + compiled extension ────────────────────────────────── +COPY sketchup_sdk/binaries/sketchup/x64/*.dll "${WINEPREFIX}/drive_c/Python311/" +COPY sketchup.pyd "${WINEPREFIX}/drive_c/Python311/" + +# ── Blender (headless) ───────────────────────────────────────────────── +ARG BLENDER_VERSION=5.0.1 +ARG BLENDER_URL=https://download.blender.org/release/Blender5.0/blender-5.0.1-linux-x64.tar.xz +RUN wget -qO /tmp/blender.tar.xz "${BLENDER_URL}" && \ + mkdir -p /opt/blender && \ + tar -xf /tmp/blender.tar.xz -C /opt/blender --strip-components=1 && \ + rm /tmp/blender.tar.xz && \ + ln -s /opt/blender/blender /usr/local/bin/blender + +# ── Converter scripts ────────────────────────────────────────────────── +COPY intermediate.py skputil.py skp_extractor.py blend_builder.py obj_builder.py \ + render_preview.py cli.py \ + /opt/skp2blend/ + +# Also copy the Python modules into the Wine Python directory so the +# extractor can import them +RUN cp /opt/skp2blend/intermediate.py /opt/skp2blend/skputil.py \ + "${WINEPREFIX}/drive_c/Python311/" + +ENV PATH="/opt/skp2blend:${PATH}" + +ENTRYPOINT ["python3", "/opt/skp2blend/cli.py", \ + "--blender", "/usr/local/bin/blender", \ + "--wine-python", "C:\\Python311\\python.exe"] diff --git a/skp2blend/Makefile b/skp2blend/Makefile new file mode 100644 index 0000000..3ddb7f4 --- /dev/null +++ b/skp2blend/Makefile @@ -0,0 +1,4 @@ +#! make + +build: + docker build -t skp2blend . diff --git a/skp2blend/README.md b/skp2blend/README.md new file mode 100644 index 0000000..24c7123 --- /dev/null +++ b/skp2blend/README.md @@ -0,0 +1,147 @@ +# skp2blend + +Convert SketchUp `.skp` files to Blender `.blend` (and optionally Wavefront `.obj`) on Linux amd64 — no SketchUp installation required. + +## Motivation + +The SketchUp C SDK and its Python bindings (`sketchup.pyd`) are Windows-only. This makes batch-converting `.skp` files on Linux servers or CI pipelines impossible without a Windows machine. skp2blend solves this by packaging everything into a single Docker image: + +- **Wine** runs the Windows Python interpreter and SketchUp SDK to read `.skp` files +- **Blender headless** builds the `.blend` output with full material, texture, camera, and hierarchy support +- **Pure-Python OBJ export** provides a lightweight alternative output format with no Blender dependency + +The result is a self-contained CLI tool that runs on any Linux amd64 host with Docker. + +## Quick start + +```bash +# Using the convenience wrapper (builds/pulls the Docker image as "skp2blend") +./convert.sh model.skp model.blend + +# Produce both .blend and .obj +./convert.sh model.skp model.blend --also-obj + +# OBJ only (faster — skips Blender entirely) +./convert.sh model.skp model.blend --obj-only +``` + +Or run the Docker image directly: + +```bash +docker run --rm \ + -v "$(pwd):/data" \ + skp2blend \ + /data/model.skp /data/model.blend +``` + +## How it works + +Conversion runs in two stages: + +1. **Stage 1 — Extract** (`skp_extractor.py`, runs under Wine) + Reads the `.skp` file via the SketchUp C SDK and writes a portable `intermediate.json` plus extracted texture files to a work directory. + +2. **Stage 2 — Build .blend** (`blend_builder.py`, runs inside Blender headless) + Reads `intermediate.json` and constructs the Blender scene: materials with Principled BSDF nodes, UV-mapped textures, cameras, the full group/component hierarchy, and deduplicated instancing for repeated components. + +3. **Stage 2b — Build .obj** (`obj_builder.py`, pure Python, optional) + Reads the same `intermediate.json`, flattens the entity tree into world-space geometry, and writes `.obj` + `.mtl` files with texture references. Useful for side-by-side comparison with the `.blend` output or as an archival format. + +## CLI options + +| Flag | Description | +|---|---| +| `--scene NAME` | Import a specific named SketchUp scene (applies layer visibility and camera) | +| `--max-instance N` | Instancing threshold — components appearing N+ times are deduplicated (default: 1) | +| `--clip-end F` | Camera far clip plane in meters (default: 250.0) | +| `--preview` | Render a 1920×1080 PNG preview image next to the output `.blend` | +| `--also-obj` | Also produce a `.obj` file alongside the `.blend` | +| `--obj-only` | Only produce `.obj` output, skip Blender | +| `--keep-work-dir` | Retain the intermediate work directory after conversion | +| `--work-dir PATH` | Use a specific work directory instead of a temporary one | + +## Using the GHCR image + +Pre-built images are published to GHCR by CI on every push to the `skp2blend` branch: + +```bash +docker run --rm \ + -v "$(pwd):/data" \ + ghcr.io/recraft-ou/sketchup_importer/skp2blend:latest \ + /data/model.skp /data/model.blend --preview +``` + +### GPU-accelerated rendering + +Pass your GPU to the container for faster EEVEE preview renders. Without a GPU, Blender falls back to software rendering (works but slower). + +**NVIDIA** (requires [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)): + +```bash +docker run --rm --gpus all \ + -v "$(pwd):/data" \ + ghcr.io/recraft-ou/sketchup_importer/skp2blend:latest \ + /data/model.skp /data/model.blend --preview +``` + +**AMD / Intel** (Mesa/RADV — pass the DRI render nodes): + +```bash +docker run --rm --device /dev/dri \ + -v "$(pwd):/data" \ + ghcr.io/recraft-ou/sketchup_importer/skp2blend:latest \ + /data/model.skp /data/model.blend --preview +``` + +## Building the Docker image + +The image requires the SketchUp C SDK DLLs and a compiled `sketchup.pyd`, which are not included in this repository. They can be obtained from the [upstream release](https://github.com/RedHaloStudio/Sketchup_Importer/releases/tag/0.27.0): + +```bash +# Download and extract the SDK artifacts +wget -qO /tmp/sdk.zip \ + https://github.com/RedHaloStudio/Sketchup_Importer/releases/download/0.27.0/sketchup_importer-0.27.zip +unzip -q /tmp/sdk.zip -d /tmp/sdk + +# Place them where the Dockerfile expects +mkdir -p sketchup_sdk/binaries/sketchup/x64 +cp /tmp/sdk/sketchup_importer/SketchUpAPI.dll \ + /tmp/sdk/sketchup_importer/SketchUpCommonPreferences.dll \ + sketchup_sdk/binaries/sketchup/x64/ +cp /tmp/sdk/sketchup_importer/sketchup.cp311-win_amd64.pyd sketchup.pyd + +# Build +docker build -t skp2blend . +``` + +A GitHub Actions workflow (`.github/workflows/docker.yml`) automates this and pushes the image to GHCR on every push to the `skp2blend` branch. + +## Output formats + +### .blend + +Full-fidelity Blender scene with: +- Principled BSDF materials with packed textures +- UV mapping +- Group/component hierarchy preserved as Blender parent-child relationships +- Named cameras from SketchUp scenes +- VERTS-based instancing for repeated components +- Negative-scale correction for mirrored components + +### .obj + .mtl + +Flat geometry suitable for interchange and archival: +- All transforms baked into world-space vertex positions +- Z-up to Y-up coordinate conversion (OBJ convention) +- Material colors, opacity, and texture map references in `.mtl` +- Texture files copied to a `textures/` directory alongside the `.obj` +- No hierarchy, cameras, or instancing (OBJ limitation) + +## Exit codes + +| Code | Meaning | +|---|---| +| 0 | Success | +| 1 | Bad input (missing file, bad arguments) | +| 2 | Stage 1 failure (extractor) | +| 3 | Stage 2 or 2b failure (builder) | diff --git a/skp2blend/blend_builder.py b/skp2blend/blend_builder.py new file mode 100644 index 0000000..501287a --- /dev/null +++ b/skp2blend/blend_builder.py @@ -0,0 +1,962 @@ +#!/usr/bin/env python +"""Stage 2 — Build a .blend file from the intermediate JSON produced by Stage 1. + +Runs inside Blender headless:: + + blender --background --python blend_builder.py -- \\ + [--max-instance N] [--scene NAME] + +Uses only headless-safe ``bpy`` APIs (no ``bpy.ops.object.add``, no +``bpy.context.screen``, no outliner ops). +""" + +import argparse +import math +import os +import sys +from collections import defaultdict + +import bmesh +import bpy +from mathutils import Matrix, Quaternion, Vector + +# Sibling modules — add our own directory to sys.path so ``intermediate`` +# and ``skputil`` can be imported regardless of how Blender was invoked. +_this_dir = os.path.dirname(os.path.abspath(__file__)) +if _this_dir not in sys.path: + sys.path.insert(0, _this_dir) + +from intermediate import load_intermediate # noqa: E402 +from skputil import ( # noqa: E402 + DEFAULT_MATERIAL_NAME, + EntityType, + group_name, + group_safe_name, + inherent_default_mat, + proxy_dict, +) + + +def skp_log(*args): + if args: + print("SU | " + " ".join(str(a) for a in args)) + + +# --------------------------------------------------------------------------- +# Hidden-tag collection management +# --------------------------------------------------------------------------- + +_hidden_tag_collections = {} # layer_name -> bpy.types.Collection + + +def get_hidden_tag_collection(layer_name): + """Return (or create) a collection for entities on a hidden tag/layer. + + Collections are named ``"Hidden Tag: "`` and linked under + the scene's root collection. They will be excluded from the view layer + after the hierarchy is built (see ``main()``). + """ + if layer_name in _hidden_tag_collections: + return _hidden_tag_collections[layer_name] + + coll_name = f"Hidden Tag: {layer_name}" + coll = bpy.data.collections.new(coll_name) + bpy.context.scene.collection.children.link(coll) + _hidden_tag_collections[layer_name] = coll + return coll + + +# --------------------------------------------------------------------------- +# Materials +# --------------------------------------------------------------------------- + +def write_materials(material_records, work_dir): + """Create Blender materials from intermediate material records. + + Returns ``(materials_dict, materials_scales_dict)``. + """ + materials = {} + materials_scales = {} + + # Default material ------------------------------------------------------- + bmat = bpy.data.materials.new(DEFAULT_MATERIAL_NAME) + bmat.diffuse_color = (0.8, 0.8, 0.8, 0) + if bpy.app.version < (6, 0, 0): + bmat.use_nodes = True + nodes = bmat.node_tree.nodes + links = bmat.node_tree.links + nodes.clear() + output_shader = nodes.new("ShaderNodeOutputMaterial") + output_shader.location = (0, 0) + principled = nodes.new("ShaderNodeBsdfPrincipled") + principled.location = (-300, 0) + links.new(principled.outputs[0], output_shader.inputs["Surface"]) + materials[DEFAULT_MATERIAL_NAME] = bmat + + textures_dir = os.path.join(work_dir, "textures") + + for rec in material_records: + name = rec["name"] + r, g, b, a = rec["color_rgba"] + tex = rec.get("texture") + + if tex: + materials_scales[name] = (tex["s_scale"], tex["t_scale"]) + else: + materials_scales[name] = (1.0, 1.0) + + bmat = bpy.data.materials.new(name) + bmat.diffuse_color = ( + math.pow(r / 255.0, 2.2), + math.pow(g / 255.0, 2.2), + math.pow(b / 255.0, 2.2), + round(a / 255.0, 2), + ) + + if round(a / 255.0, 2) < 1: + bmat.blend_method = "BLEND" + + if bpy.app.version < (6, 0, 0): + bmat.use_nodes = True + + nodes = bmat.node_tree.nodes + links = bmat.node_tree.links + nodes.clear() + output_shader = nodes.new("ShaderNodeOutputMaterial") + output_shader.location = (0, 0) + principled = nodes.new("ShaderNodeBsdfPrincipled") + principled.location = (-300, 0) + links.new(principled.outputs[0], output_shader.inputs["Surface"]) + + default_shader = nodes["Principled BSDF"] + default_shader.inputs["Base Color"].default_value = bmat.diffuse_color + default_shader.inputs["Alpha"].default_value = round(a / 255.0, 2) + + if tex: + tex_path = os.path.join(textures_dir, tex["filename"]) + if os.path.isfile(tex_path): + img = bpy.data.images.load(tex_path) + img.pack() + tex_node = nodes.new("ShaderNodeTexImage") + tex_node.image = img + tex_node.location = (-600, 0) + links.new(tex_node.outputs["Color"], default_shader.inputs["Base Color"]) + if img.file_format in ("PNG", "TARGA"): + links.new(tex_node.outputs["Alpha"], default_shader.inputs["Alpha"]) + else: + skp_log(f"Warning: texture file not found: {tex_path}") + + materials[name] = bmat + + return materials, materials_scales + + +# --------------------------------------------------------------------------- +# Mesh building +# --------------------------------------------------------------------------- + +def build_mesh(mesh_data, name, materials): + """Create a Blender mesh from an intermediate MeshData dict. + + Returns ``(mesh, alpha_flag)`` or ``(None, False)`` if *mesh_data* is None. + """ + if mesh_data is None: + return None, False + + verts = mesh_data["vertices"] + tris = mesh_data["triangles"] + uv_list = mesh_data["uvs_per_triangle"] + mat_indices = mesh_data["triangle_material_indices"] + smooth_flags = mesh_data["triangle_smooth"] + face_mats = mesh_data["face_materials"] + + if not verts: + return None, False + + me = bpy.data.meshes.new(name) + alpha = False + uvs_used = False + + # Assign material slots ------------------------------------------------ + for mat_name in face_mats: + bmat = materials.get(mat_name, materials.get(DEFAULT_MATERIAL_NAME)) + me.materials.append(bmat) + try: + if "Image Texture" in bmat.node_tree.nodes.keys(): + uvs_used = True + except AttributeError: + pass + + # Geometry -------------------------------------------------------------- + tri_count = len(tris) + loops_vert_idx = [] + for t in tris: + loops_vert_idx.extend(t) + + loop_start = [] + idx = 0 + for t in tris: + loop_start.append(idx) + idx += len(t) + loop_total = [len(t) for t in tris] + + flat_verts = [] + for v in verts: + flat_verts.extend(v) + + me.vertices.add(len(verts)) + me.vertices.foreach_set("co", flat_verts) + + me.loops.add(len(loops_vert_idx)) + me.loops.foreach_set("vertex_index", loops_vert_idx) + + me.polygons.add(tri_count) + me.polygons.foreach_set("loop_start", loop_start) + me.polygons.foreach_set("loop_total", loop_total) + me.polygons.foreach_set("material_index", mat_indices) + me.polygons.foreach_set("use_smooth", smooth_flags) + + # UVs ------------------------------------------------------------------- + if uvs_used and uv_list: + me.uv_layers.new() + k = 0 + for i in range(tri_count): + for j in range(3): + uv_off = j * 2 + me.uv_layers[0].data[k].uv = (uv_list[i][uv_off], uv_list[i][uv_off + 1]) + k += 1 + + me.update(calc_edges=True) + me.validate() + return me, alpha + + +# --------------------------------------------------------------------------- +# Component analysis (on intermediate tree) +# --------------------------------------------------------------------------- + +def _inherent_mat(node_mat, parent_default): + return inherent_default_mat(node_mat, parent_default) + + +def analyze_entities(node, parent_transform, default_material, etype, component_stats, component_skip): + """Walk the entity tree and count component instances (mirrors SceneImporter.analyze_entities).""" + if etype == EntityType.component: + name = node.get("definition_name", node["name"]) + component_stats[(name, default_material)].append(parent_transform) + + for child in node.get("children", []): + child_type = child["type"] + if child.get("hidden"): + continue + child_mat = _inherent_mat(child.get("material_name"), default_material) + child_transform = parent_transform + if child.get("transform"): + child_transform = (Matrix(parent_transform) @ Matrix(child["transform"])).to_4x4() + child_transform = [list(row) for row in child_transform] + + if child_type == "group": + analyze_entities(child, child_transform, child_mat, EntityType.group, + component_stats, component_skip) + elif child_type == "component_instance": + cname = child.get("definition_name", child["name"]) + if (cname, child_mat) in component_skip: + continue + analyze_entities(child, child_transform, child_mat, EntityType.component, + component_stats, component_skip) + + return component_stats + + +# --------------------------------------------------------------------------- +# Deduplicated groups (ports write_duplicateable_groups) +# --------------------------------------------------------------------------- + +def write_duplicateable_groups( + entity_tree, + comp_depth_map, + max_instance, + materials, + component_skip, + group_written, + component_meshes, +): + """Create Blender collections for high-frequency components.""" + component_stats = analyze_entities( + entity_tree, + [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + DEFAULT_MATERIAL_NAME, + EntityType.none, + defaultdict(list), + component_skip, + ) + component_stats = {k: v for k, v in component_stats.items() if len(v) >= max_instance} + + max_depth = max(comp_depth_map.values(), default=0) + + for i in range(max_depth + 1): + for k, v in component_stats.items(): + name, mat = k + depth = comp_depth_map.get(name, 0) + if depth == 1: + pass + elif depth == i: + gname = group_name(name, mat) + if gname in bpy.data.collections: + skp_log(f"Group {gname} already defined") + component_skip[(name, mat)] = True + group_written[(name, mat)] = bpy.data.collections[gname] + else: + group = bpy.data.collections.new(name=gname) + skp_log(f"Component {gname} written as group") + _build_group_from_tree( + entity_tree, name, mat, group, + materials, component_skip, group_written, component_meshes, + ) + component_skip[(name, mat)] = True + group_written[(name, mat)] = group + + +def _find_definition_node(tree, def_name): + """Find the first node in tree whose definition_name matches.""" + if tree.get("definition_name") == def_name: + return tree + for child in tree.get("children", []): + result = _find_definition_node(child, def_name) + if result is not None: + return result + return None + + +def _build_group_from_tree( + entity_tree, comp_name, default_material, group, + materials, component_skip, group_written, component_meshes, +): + """Build collection objects for a component definition (ports component_def_as_group).""" + node = _find_definition_node(entity_tree, comp_name) + if node is None: + return + + mesh_data = node.get("mesh") + mesh_key = (comp_name, default_material) + + if mesh_key in component_meshes: + me, alpha = component_meshes[mesh_key] + else: + me, alpha = build_mesh(mesh_data, comp_name, materials) + component_meshes[mesh_key] = (me, alpha) + + if me: + ob = bpy.data.objects.new(comp_name, me) + ob.matrix_world = Matrix.Identity(4) + me.update(calc_edges=True) + bpy.context.collection.objects.link(ob) + group.objects.link(ob) + + for child in node.get("children", []): + if child.get("hidden"): + continue + child_type = child["type"] + child_mat = _inherent_mat(child.get("material_name"), default_material) + child_name = child["name"] + + if child_type == "group": + child_mesh = child.get("mesh") + ckey = (child_name, child_mat) + if ckey in component_meshes: + cme, calpha = component_meshes[ckey] + else: + cme, calpha = build_mesh(child_mesh, child_name, materials) + component_meshes[ckey] = (cme, calpha) + if cme: + cob = bpy.data.objects.new(child_name, cme) + if child.get("transform"): + cob.matrix_world = Matrix(child["transform"]) + cme.update(calc_edges=True) + bpy.context.collection.objects.link(cob) + group.objects.link(cob) + + elif child_type == "component_instance": + cdef_name = child.get("definition_name", child_name) + if (cdef_name, child_mat) in component_skip: + ob = _instance_object_or_group(cdef_name, child_mat, group_written, component_meshes) + if child.get("transform"): + ob.matrix_world = Matrix(child["transform"]) + bpy.context.collection.objects.link(ob) + group.objects.link(ob) + else: + child_mesh = child.get("mesh") + ckey = (child_name, child_mat) + if ckey in component_meshes: + cme, calpha = component_meshes[ckey] + else: + cme, calpha = build_mesh(child_mesh, child_name, materials) + component_meshes[ckey] = (cme, calpha) + if cme: + cob = bpy.data.objects.new(child_name, cme) + if child.get("transform"): + cob.matrix_world = Matrix(child["transform"]) + cme.update(calc_edges=True) + bpy.context.collection.objects.link(cob) + group.objects.link(cob) + + +def _instance_object_or_group(name, default_material, group_written, component_meshes): + """Return an object that instances a group or directly references a mesh.""" + if (name, default_material) in group_written: + grp = group_written[(name, default_material)] + ob = bpy.data.objects.new(name=name, object_data=None) + ob.instance_type = "COLLECTION" + ob.instance_collection = grp + ob.empty_display_size = 0.01 + return ob + me, alpha = component_meshes.get((name, default_material), (None, False)) + if me is not None: + ob = bpy.data.objects.new(name, me) + if alpha: + ob.show_transparent = True + me.update(calc_edges=True) + return ob + # Fallback — empty + return bpy.data.objects.new(name, None) + + +# --------------------------------------------------------------------------- +# Entity hierarchy (ports write_entities) +# --------------------------------------------------------------------------- + +def _node_has_geometry(node, layers_skip): + """Check whether a node or any of its descendants contain mesh data.""" + mesh = node.get("mesh") + if mesh and mesh.get("vertices"): + return True + for child in node.get("children", []): + if child.get("hidden"): + continue + child_layer = child.get("layer_name") + if layers_skip and child_layer in layers_skip: + continue + if _node_has_geometry(child, layers_skip): + return True + return False + + +def _count_visible_children(node, layers_skip): + """Count visible (non-hidden, non-skipped) children of a node.""" + count = 0 + for child in node.get("children", []): + if child.get("hidden"): + continue + child_layer = child.get("layer_name") + if layers_skip and child_layer in layers_skip: + count += 1 # still counts — will go into hidden-tag collection + continue + count += 1 + return count + + +def write_entities( + node, + parent_transform, + default_material, + etype, + parent_obj, + parent_location, + materials, + component_skip, + component_stats, + group_written, + component_meshes, + layers_skip, + target_collection=None, + depth=0, +): + """Recursively build Blender objects from the entity tree. + + *target_collection* overrides ``bpy.context.collection`` for linking + objects. Used to place hidden-tag entities into their own collection. + """ + coll = target_collection or bpy.context.collection + name = node["name"] + + # Deduplicated component — record transform only + if etype == EntityType.component: + def_name = node.get("definition_name", name) + if (def_name, default_material) in component_skip: + component_stats[(def_name, default_material)].append( + [list(row) for row in Matrix(parent_transform)] + ) + return + + # Build mesh + mesh_key = (name, default_material) + if mesh_key in component_meshes: + me, alpha = component_meshes[mesh_key] + else: + me, alpha = build_mesh(node.get("mesh"), name, materials) + component_meshes[mesh_key] = (me, alpha) + + visible_children = _count_visible_children(node, layers_skip) + + # Skip empty groups that have no geometry anywhere in their subtree + if not me and visible_children == 0: + return + if not me and etype == EntityType.group and not _node_has_geometry(node, layers_skip): + return + + # Create a sub-collection for top-level groups to spread objects across + # multiple collections and reduce depsgraph churn. + sub_collection = None + if depth == 1 and visible_children > 0 and name != "_(Loose Entity)": + sub_collection = bpy.data.collections.new(name) + coll.children.link(sub_collection) + + link_coll = sub_collection or coll + + hide_empty = False + + if visible_children == 0 or name == "_(Loose Entity)": + ob = bpy.data.objects.new(name, me) + ob.matrix_world = Matrix(parent_transform) + if me: + me.update(calc_edges=True) + else: + ob = bpy.data.objects.new(name, None) + ob.matrix_world = Matrix(parent_transform) + hide_empty = True + if me: + ob_mesh = bpy.data.objects.new("_" + name + " (Loose Mesh)", me) + ob_mesh.matrix_world = Matrix(parent_transform) + me.update(calc_edges=True) + ob_mesh.parent = ob + ob_mesh.location = Vector((0, 0, 0)) + link_coll.objects.link(ob_mesh) + + loc = ob.location + nested_location = Vector((loc[0], loc[1], loc[2])) + + if parent_obj is not None and parent_obj.name != "_(Loose Entity)": + ob.parent = parent_obj + ob.location -= parent_location + + if visible_children > 0: + ob.rotation_mode = "QUATERNION" + ob.rotation_quaternion = Vector((1, 0, 0, 0)) + ob.scale = Vector((1, 1, 1)) + + link_coll.objects.link(ob) + ob.hide_set(hide_empty) + + for child in node.get("children", []): + if child.get("hidden"): + continue + + child_type = child["type"] + child_mat = _inherent_mat(child.get("material_name"), default_material) + + # If the child is on a skipped layer, redirect it (and its subtree) + # into a per-tag hidden collection instead of skipping it entirely. + child_coll = sub_collection or target_collection + child_layer = child.get("layer_name") + if layers_skip and child_layer in layers_skip: + child_coll = get_hidden_tag_collection(child_layer) + + child_transform = parent_transform + if child.get("transform"): + child_transform = Matrix(parent_transform) @ Matrix(child["transform"]) + child_transform = [list(row) for row in child_transform] + + if child_type == "group": + # Generate safe name the same way the original does + temp_name = child["name"] + gname = "G-" + group_safe_name(temp_name) + child_copy = dict(child) + child_copy["name"] = gname + write_entities( + child_copy, child_transform, child_mat, EntityType.group, + ob, nested_location, + materials, component_skip, component_stats, + group_written, component_meshes, layers_skip, + target_collection=child_coll, + depth=depth + 1, + ) + elif child_type == "component_instance": + write_entities( + child, child_transform, child_mat, EntityType.component, + ob, nested_location, + materials, component_skip, component_stats, + group_written, component_meshes, layers_skip, + target_collection=child_coll, + depth=depth + 1, + ) + + +# --------------------------------------------------------------------------- +# Instancing (ports instance_group_dupli_vert) +# --------------------------------------------------------------------------- + +def instance_group_dupli_vert(name, default_material, component_stats, group_written, component_meshes): + """Create VERTS-based instancing for deduplicated components.""" + + def get_orientations(transforms): + orientations = defaultdict(list) + for t in transforms: + loc, rot, scale = Matrix(t).decompose() + s = (scale[0], scale[1], scale[2]) + r = (rot[0], rot[1], rot[2], rot[3]) + orientations[(s, r)].append((loc[0], loc[1], loc[2])) + for key, locs in orientations.items(): + s, r = key + yield s, r, locs + + for scale, rot, locs in get_orientations(component_stats[(name, default_material)]): + verts = [] + main_loc = Vector(locs[0]) + for c in locs: + verts.append(Vector(c) - main_loc) + + flat_verts = [] + for v in verts: + flat_verts.extend(v) + + dme = bpy.data.meshes.new("DUPLI-" + name) + dme.vertices.add(len(verts)) + dme.vertices.foreach_set("co", flat_verts) + dme.update(calc_edges=True) + dme.validate() + + dob = bpy.data.objects.new("DUPLI-" + name, dme) + dob.location = main_loc + dob.instance_type = "VERTS" + + ob = _instance_object_or_group(name, default_material, group_written, component_meshes) + ob.scale = scale + ob.rotation_mode = "QUATERNION" + ob.rotation_quaternion = Quaternion((rot[0], rot[1], rot[2], rot[3])) + ob.parent = dob + + bpy.context.collection.objects.link(ob) + bpy.context.collection.objects.link(dob) + skp_log(f"Complex group {name} {default_material} instanced {len(verts)} times") + + +# --------------------------------------------------------------------------- +# Camera creation (ports write_camera — headless-safe, no bpy.ops.object.add) +# --------------------------------------------------------------------------- + +def write_camera(cam_record, name="Last View", aspect_ratio_fallback=16 / 9, clip_end=250.0): + """Create a Blender camera from an intermediate CameraRecord.""" + pos = Vector(cam_record["position"]) + target = Vector(cam_record["target"]) + up = Vector(cam_record["up"]) + fov = cam_record["fov"] + aspect_ratio = cam_record["aspect_ratio"] + + cam_data = bpy.data.cameras.new("Cam: " + name) + ob = bpy.data.objects.new("Cam: " + name, cam_data) + + ob.location = pos + + z = pos - target + x = up.cross(z) + y = z.cross(x) + x.normalize() + y.normalize() + z.normalize() + + ob.matrix_world.col[0] = x.resized(4) + ob.matrix_world.col[1] = y.resized(4) + ob.matrix_world.col[2] = z.resized(4) + ob.matrix_world.col[3] = Vector((pos[0], pos[1], pos[2], 1.0)) + + if aspect_ratio is None: + aspect_ratio = aspect_ratio_fallback + + if fov is None: + cam_data.type = "ORTHO" + else: + cam_data.angle = (math.pi * fov / 180) * aspect_ratio + + cam_data.clip_end = clip_end + + bpy.context.collection.objects.link(ob) + return ob, cam_data + + +# --------------------------------------------------------------------------- +# Post-processing: fix negative-determinant transforms +# --------------------------------------------------------------------------- + +# A 4x4 matrix that negates the Z column — multiplying on the right flips +# the determinant sign without changing location or the other two axes. +_FLIP_Z = Matrix(( + (1, 0, 0, 0), + (0, 1, 0, 0), + (0, 0, -1, 0), + (0, 0, 0, 1), +)) + + +def _mirror_mesh_z(me): + """Negate Z of all vertices and reverse face winding. + + This "bakes" a Z-axis reflection into the mesh data so that the + corresponding matrix correction (``@ _FLIP_Z``) produces the same + world-space positions and correct outward-facing normals with a + positive-determinant transform. + """ + bm = bmesh.new() + bm.from_mesh(me) + # Negate Z of every vertex + for v in bm.verts: + v.co.z = -v.co.z + # Reverse face winding to keep normals outward after the reflection + bmesh.ops.reverse_faces(bm, faces=bm.faces) + bm.to_mesh(me) + bm.free() + me.update() + + +def fix_negative_scales(): + """Fix objects whose world matrix has a negative determinant. + + Mirrored SketchUp components produce transforms with det < 0. Blender + handles these by flipping normals at render time, which is slow and can + cause viewport flickering with many objects. + + For each affected mesh object we flip the mesh normals (reversing face + winding) and correct the *local* matrix so the final world determinant + is positive. Shared meshes are duplicated where necessary so + non-mirrored users are unaffected. + + We must modify ``matrix_local`` (not ``matrix_world``) because + ``matrix_world`` is recomputed from the parent chain and our changes + would be lost. + """ + # Force a depsgraph update so matrix_world values are current. + bpy.context.view_layer.update() + + # Group mesh objects by their mesh data-block + mesh_users = defaultdict(list) # mesh name -> [(ob, needs_flip)] + for ob in bpy.data.objects: + if ob.type != 'MESH' or ob.data is None: + continue + needs_flip = ob.matrix_world.determinant() < 0 + mesh_users[ob.data.name].append((ob, needs_flip)) + + flipped_count = 0 + copied_count = 0 + + for me_name, users in mesh_users.items(): + neg_users = [(ob, nf) for ob, nf in users if nf] + pos_users = [(ob, nf) for ob, nf in users if not nf] + + if not neg_users: + continue # all positive — nothing to do + + if not pos_users: + # All users are mirrored — flip normals in-place and fix + # every user's local matrix. + me = neg_users[0][0].data + _mirror_mesh_z(me) + for ob, _ in neg_users: + ob.matrix_local = ob.matrix_local @ _FLIP_Z + flipped_count += 1 + else: + # Mixed: some users are mirrored, others aren't. Duplicate + # the mesh for the mirrored users and flip normals on the copy. + me_orig = neg_users[0][0].data + me_copy = me_orig.copy() + me_copy.name = me_orig.name + ".mirror" + _mirror_mesh_z(me_copy) + for ob, _ in neg_users: + ob.data = me_copy + ob.matrix_local = ob.matrix_local @ _FLIP_Z + flipped_count += 1 + copied_count += 1 + + # Update depsgraph so world matrices reflect our local changes + bpy.context.view_layer.update() + + skp_log(f"Fixed {flipped_count} negative-scale object(s) ({copied_count} mesh copies)") + + +def remove_degenerate_faces(): + """Remove zero-area faces that can cause shading artifacts.""" + removed_total = 0 + for me in bpy.data.meshes: + bm = bmesh.new() + bm.from_mesh(me) + degenerate = [f for f in bm.faces if f.calc_area() < 1e-8] + if degenerate: + bmesh.ops.delete(bm, geom=degenerate, context='FACES') + removed_total += len(degenerate) + bm.to_mesh(me) + me.update() + bm.free() + if removed_total: + skp_log(f"Removed {removed_total} zero-area face(s)") + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main(): + # Parse args after the Blender ``--`` separator + argv = sys.argv + if "--" in argv: + argv = argv[argv.index("--") + 1:] + else: + argv = [] + + parser = argparse.ArgumentParser(description="Build a .blend from intermediate JSON") + parser.add_argument("work_dir", help="Directory containing intermediate.json and textures/") + parser.add_argument("output_blend", help="Output .blend file path") + parser.add_argument("--max-instance", type=int, default=1, help="Instancing threshold") + parser.add_argument("--scene", type=str, default="", help="Import a specific named scene") + parser.add_argument("--clip-end", type=float, default=250.0, help="Camera far clip plane") + args = parser.parse_args(argv) + + skp_log(f"Loading intermediate data from {args.work_dir}") + data = load_intermediate(args.work_dir) + + # Determine hidden layers if a specific scene is requested + layers_skip = set() + selected_scene = None + if args.scene: + for sc in data.get("scenes", []): + if sc["name"] == args.scene: + selected_scene = sc + layers_skip = set(sc.get("hidden_layer_names", [])) + skp_log(f"Importing scene '{args.scene}', hiding {len(layers_skip)} layer(s)") + break + + # Set render engine — use EEVEE for fast viewport display. + # EEVEE avoids the progressive-render flickering that Cycles causes when + # opening files with many objects. The engine ID changed in Blender 5.0. + if bpy.app.version >= (5, 0, 0): + bpy.context.scene.render.engine = "BLENDER_EEVEE" + else: + bpy.context.scene.render.engine = "BLENDER_EEVEE_NEXT" + + # Remove default objects (Cube, Camera, Light) that Blender creates + for obj_name in ("Cube", "Camera", "Light"): + ob = bpy.data.objects.get(obj_name) + if ob is not None: + bpy.data.objects.remove(ob, do_unlink=True) + + # --- Materials --- + skp_log("Creating materials...") + materials, materials_scales = write_materials(data["materials"], args.work_dir) + skp_log(f" {len(materials)} material(s)") + + # --- Component depths --- + comp_depth_map = {} + for cd in data.get("component_definitions", []): + comp_depth_map[cd["name"]] = cd["depth"] + + entity_tree = data["entity_tree"] + + # --- Cameras --- + skp_log("Creating cameras...") + ren = bpy.context.scene.render + aspect_fallback = ren.resolution_x / ren.resolution_y + + # Named scenes as cameras + for sc in data.get("scenes", []): + write_camera(sc["camera"], sc["name"], aspect_ratio_fallback=aspect_fallback, clip_end=args.clip_end) + + # Model camera + if data.get("cameras"): + cam_ob, cam_data = write_camera( + data["cameras"][0], "Last View", + aspect_ratio_fallback=aspect_fallback, + clip_end=args.clip_end, + ) + if selected_scene: + # If importing a specific scene, use that scene's camera + for sc in data.get("scenes", []): + if sc["name"] == args.scene: + cam_ob, cam_data = write_camera( + sc["camera"], sc["name"], + aspect_ratio_fallback=aspect_fallback, + clip_end=args.clip_end, + ) + break + bpy.context.scene.camera = cam_ob + + # --- Deduplicated groups --- + skp_log("Writing deduplicated groups...") + component_skip = proxy_dict() + group_written = {} + component_meshes = {} + component_stats = defaultdict(list) + + write_duplicateable_groups( + entity_tree, comp_depth_map, args.max_instance, + materials, component_skip, group_written, component_meshes, + ) + + # Hide the component collections + for gname, coll in group_written.items(): + coll.hide_viewport = True + + # --- Entity hierarchy --- + skp_log("Building entity hierarchy...") + write_entities( + entity_tree, + [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + DEFAULT_MATERIAL_NAME, + EntityType.none, + None, + Vector((0, 0, 0)), + materials, + component_skip, + component_stats, + group_written, + component_meshes, + layers_skip, + ) + + # --- Instancing --- + skp_log("Creating instances...") + for k in component_stats: + name, mat = k + instance_group_dupli_vert(name, mat, component_stats, group_written, component_meshes) + + # --- Exclude hidden-tag collections from view layer --- + if _hidden_tag_collections: + vl_root = bpy.context.view_layer.layer_collection + for child_lc in vl_root.children: + if child_lc.name.startswith("Hidden Tag: "): + child_lc.exclude = True + skp_log(f"Excluded {len(_hidden_tag_collections)} hidden-tag collection(s) from view layer") + + # --- Post-processing --- + skp_log("Post-processing...") + fix_negative_scales() + remove_degenerate_faces() + + # Purge orphan data blocks (unused materials/images) to reduce file size + bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=False, do_recursive=True) + skp_log("Purged orphan data blocks") + + # Force all 3D viewports to SOLID shading to prevent EEVEE shader + # compilation from freezing the GUI on first open. + for screen in bpy.data.screens: + for area in screen.areas: + if area.type == "VIEW_3D": + for space in area.spaces: + if space.type == "VIEW_3D": + space.shading.type = "SOLID" + space.shading.color_type = "MATERIAL" + + # --- Save --- + skp_log(f"Saving {args.output_blend}") + bpy.ops.wm.save_as_mainfile(filepath=args.output_blend) + skp_log("Done.") + + +if __name__ == "__main__": + main() diff --git a/skp2blend/cli.py b/skp2blend/cli.py new file mode 100644 index 0000000..29c9b63 --- /dev/null +++ b/skp2blend/cli.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python3 +"""CLI orchestrator for the two-stage SKP-to-Blend converter. + +Usage:: + + python cli.py input.skp output.blend [--max-instance N] [--scene NAME] \\ + [--keep-work-dir] [--clip-end F] + +Stage 1 runs ``skp_extractor.py`` under Wine (Windows Python + SketchUp SDK). +Stage 2 runs ``blend_builder.py`` inside Blender headless. + +Exit codes: + 0 success + 1 bad input (missing file, bad arguments) + 2 Stage 1 failure (extractor) + 3 Stage 2 failure (builder) +""" + +import argparse +import os +import platform +import shutil +import subprocess +import sys +import tempfile + +_THIS_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def _to_wine_path(posix_path): + """Convert a POSIX absolute path to a Wine Z:-drive path.""" + return "Z:" + posix_path.replace("/", "\\") + + +def main(): + parser = argparse.ArgumentParser( + description="Convert a SketchUp .skp file to a Blender .blend file", + ) + parser.add_argument("input_skp", help="Path to the input .skp file") + parser.add_argument("output_blend", help="Path for the output .blend file") + parser.add_argument("--max-instance", type=int, default=1, help="Instancing threshold (default: 1)") + parser.add_argument("--scene", type=str, default="", help="Import a specific named SketchUp scene") + parser.add_argument("--clip-end", type=float, default=250.0, help="Camera far clip plane in meters") + parser.add_argument("--keep-work-dir", action="store_true", help="Don't delete the intermediate work directory") + parser.add_argument("--work-dir", type=str, default="", help="Use a specific work directory instead of a temp one") + parser.add_argument("--preview", action="store_true", help="Render a PNG preview image next to the output .blend") + parser.add_argument("--also-obj", action="store_true", help="Also produce a Wavefront OBJ alongside the .blend") + parser.add_argument("--obj-only", action="store_true", help="Only produce OBJ output (skip Blender Stage 2)") + parser.add_argument( + "--wine-python", + type=str, + default=r"C:\Python311\python.exe", + help="Wine path to Windows Python executable (default: C:\\Python311\\python.exe)", + ) + parser.add_argument( + "--blender", + type=str, + default="blender", + help="Path to the Blender executable (default: blender)", + ) + args = parser.parse_args() + + # --- Validate input --- + input_skp = os.path.abspath(args.input_skp) + output_blend = os.path.abspath(args.output_blend) + + if not os.path.isfile(input_skp): + print(f"Error: input file not found: {input_skp}", file=sys.stderr) + sys.exit(1) + + # --- Work directory --- + if args.work_dir: + work_dir = os.path.abspath(args.work_dir) + os.makedirs(work_dir, exist_ok=True) + cleanup = False + else: + work_dir = tempfile.mkdtemp(prefix="skp2blend_") + cleanup = not args.keep_work_dir + + print(f"Work directory: {work_dir}") + + extractor_script = os.path.join(_THIS_DIR, "skp_extractor.py") + builder_script = os.path.join(_THIS_DIR, "blend_builder.py") + + try: + # ============================================================= + # Stage 1 — Extract .skp data (runs under Wine on Linux) + # ============================================================= + print("\n=== Stage 1: Extracting SKP data ===") + + is_linux = platform.system() == "Linux" + + if is_linux: + win_input = _to_wine_path(input_skp) + win_work = _to_wine_path(work_dir) + win_script = _to_wine_path(extractor_script) + stage1_cmd = [ + "xvfb-run", "-a", + "wine", args.wine_python, + win_script, win_input, win_work, + ] + else: + # On macOS/Windows we can run the extractor natively + stage1_cmd = [ + sys.executable, + extractor_script, input_skp, work_dir, + ] + + print(f"Running: {' '.join(stage1_cmd)}") + result = subprocess.run(stage1_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + # Filter out known harmless noise from Wine/libtiff/xvfb + _noise = {"not a tiff or mdi file", "x connection to", "broken (explicit kill"} + for line in result.stdout.decode(errors="replace").splitlines(): + if any(pat in line.lower() for pat in _noise): + continue + print(line) + + if result.returncode != 0: + print(f"\nError: Stage 1 (extractor) failed with exit code {result.returncode}", file=sys.stderr) + sys.exit(2) + + intermediate_path = os.path.join(work_dir, "intermediate.json") + if not os.path.isfile(intermediate_path): + print(f"\nError: Stage 1 did not produce {intermediate_path}", file=sys.stderr) + sys.exit(2) + + print(f"Stage 1 complete — intermediate.json ({os.path.getsize(intermediate_path)} bytes)") + + # ============================================================= + # Stage 2 — Build .blend (runs inside Blender headless) + # ============================================================= + if not args.obj_only: + print("\n=== Stage 2: Building .blend file ===") + + stage2_cmd = [ + args.blender, "--background", "--python", builder_script, + "--", + work_dir, output_blend, + "--max-instance", str(args.max_instance), + "--clip-end", str(args.clip_end), + ] + if args.scene: + stage2_cmd.extend(["--scene", args.scene]) + + print(f"Running: {' '.join(stage2_cmd)}") + result = subprocess.run(stage2_cmd) + + if result.returncode != 0: + print(f"\nError: Stage 2 (builder) failed with exit code {result.returncode}", file=sys.stderr) + sys.exit(3) + + if not os.path.isfile(output_blend): + print(f"\nError: Stage 2 did not produce {output_blend}", file=sys.stderr) + sys.exit(3) + + print(f"\nSuccess: {output_blend} ({os.path.getsize(output_blend)} bytes)") + + # ============================================================= + # Preview render + # ============================================================= + if args.preview and not args.obj_only: + preview_path = os.path.splitext(output_blend)[0] + ".png" + print(f"\n=== Rendering preview to {preview_path} ===") + + render_script = os.path.join(_THIS_DIR, "render_preview.py") + preview_cmd = [ + args.blender, "--background", output_blend, + "--python", render_script, + "--", preview_path, + ] + + print(f"Running: {' '.join(preview_cmd)}") + result = subprocess.run(preview_cmd) + + if result.returncode != 0: + print("Warning: preview render failed", file=sys.stderr) + elif os.path.isfile(preview_path): + print(f"Preview: {preview_path} ({os.path.getsize(preview_path)} bytes)") + + # ============================================================= + # Stage 2b — Build OBJ (pure Python, no Blender needed) + # ============================================================= + if args.also_obj or args.obj_only: + obj_output = os.path.splitext(output_blend)[0] + ".obj" + obj_builder_script = os.path.join(_THIS_DIR, "obj_builder.py") + + print("\n=== Stage 2b: Building OBJ file ===") + + stage2b_cmd = [ + sys.executable, obj_builder_script, + work_dir, obj_output, + ] + if args.scene: + stage2b_cmd.extend(["--scene", args.scene]) + + print(f"Running: {' '.join(stage2b_cmd)}") + result = subprocess.run(stage2b_cmd) + + if result.returncode != 0: + print(f"\nError: Stage 2b (OBJ builder) failed with exit code {result.returncode}", file=sys.stderr) + sys.exit(3) + + if not os.path.isfile(obj_output): + print(f"\nError: Stage 2b did not produce {obj_output}", file=sys.stderr) + sys.exit(3) + + print(f"\nSuccess: {obj_output} ({os.path.getsize(obj_output)} bytes)") + + finally: + if cleanup: + print(f"Cleaning up {work_dir}") + shutil.rmtree(work_dir, ignore_errors=True) + elif args.keep_work_dir or args.work_dir: + print(f"Work directory retained at {work_dir}") + + +if __name__ == "__main__": + main() diff --git a/skp2blend/convert.sh b/skp2blend/convert.sh new file mode 100755 index 0000000..3b7edb1 --- /dev/null +++ b/skp2blend/convert.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Convert a .skp file to .blend using the skp2blend Docker image. +# +# Usage: ./convert.sh [output.blend] +# +# If output is omitted, the .blend file is written next to the input +# with the same base name. + +set -euo pipefail + +if [ $# -lt 1 ]; then + echo "Usage: $0 [output.blend] [flags...]" >&2 + echo "Flags are passed through to skp2blend (e.g. --preview, --scene NAME, --also-obj)" >&2 + exit 1 +fi + +INPUT="$(realpath "$1")" +INPUT_DIR="$(dirname "$INPUT")" +INPUT_NAME="$(basename "$INPUT")" + +if [ ! -f "$INPUT" ]; then + echo "Error: file not found: $INPUT" >&2 + exit 1 +fi + +# Parse positional and flag arguments +OUTPUT="" +EXTRA_FLAGS=() +shift +while [ $# -gt 0 ]; do + case "$1" in + --*) + EXTRA_FLAGS+=("$1") + # Consume the next arg too if this flag takes a value + case "$1" in + --scene|--max-instance|--clip-end|--work-dir) + shift + EXTRA_FLAGS+=("$1") + ;; + esac + ;; + *) + OUTPUT="$1" + ;; + esac + shift +done + +if [ -z "$OUTPUT" ]; then + OUTPUT="${INPUT_DIR}/${INPUT_NAME%.skp}.blend" +else + OUTPUT="$(realpath -m "$OUTPUT")" +fi +OUTPUT_DIR="$(dirname "$OUTPUT")" +OUTPUT_NAME="$(basename "$OUTPUT")" + +# If input and output are in the same directory we only need one mount +if [ "$INPUT_DIR" = "$OUTPUT_DIR" ]; then + docker run --rm \ + -v "${INPUT_DIR}:/data" \ + skp2blend \ + "/data/${INPUT_NAME}" "/data/${OUTPUT_NAME}" "${EXTRA_FLAGS[@]+${EXTRA_FLAGS[@]}}" +else + docker run --rm \ + -v "${INPUT_DIR}:/input:ro" \ + -v "${OUTPUT_DIR}:/output" \ + skp2blend \ + "/input/${INPUT_NAME}" "/output/${OUTPUT_NAME}" "${EXTRA_FLAGS[@]+${EXTRA_FLAGS[@]}}" +fi + +echo "Output: ${OUTPUT}" diff --git a/skp2blend/diagnose_blend.py b/skp2blend/diagnose_blend.py new file mode 100644 index 0000000..3ff6b93 --- /dev/null +++ b/skp2blend/diagnose_blend.py @@ -0,0 +1,167 @@ +import bpy +import sys + +bpy.ops.wm.open_mainfile(filepath="/data/kornlada.blend") + +print("=" * 70) +print("BLEND FILE DIAGNOSTICS") +print("=" * 70) + +# Basic counts +print(f"\nObjects: {len(bpy.data.objects)}") +print(f"Meshes: {len(bpy.data.meshes)}") +print(f"Materials: {len(bpy.data.materials)}") +print(f"Images: {len(bpy.data.images)}") +print(f"Collections: {len(bpy.data.collections)}") +print(f"Cameras: {len(bpy.data.cameras)}") + +# Object type breakdown +from collections import Counter +type_counts = Counter(ob.type for ob in bpy.data.objects) +print(f"\nObject types: {dict(type_counts)}") + +# Instance types +instance_counts = Counter(ob.instance_type for ob in bpy.data.objects if ob.instance_type != 'NONE') +if instance_counts: + print(f"Instance types: {dict(instance_counts)}") + +# Check for objects with VERTS instancing (dupli-verts) +dupli_verts = [ob for ob in bpy.data.objects if ob.instance_type == 'VERTS'] +if dupli_verts: + print(f"\nDUPLI-VERT objects ({len(dupli_verts)}):") + for ob in dupli_verts: + child_count = len([c for c in bpy.data.objects if c.parent == ob]) + vert_count = len(ob.data.vertices) if ob.data else 0 + print(f" {ob.name}: {vert_count} verts (instances) x {child_count} child(ren)") + +# Check for COLLECTION instancing +collection_instances = [ob for ob in bpy.data.objects if ob.instance_type == 'COLLECTION'] +if collection_instances: + print(f"\nCOLLECTION instance objects ({len(collection_instances)}):") + for ob in collection_instances: + coll = ob.instance_collection + coll_name = coll.name if coll else "NONE" + coll_objs = len(coll.objects) if coll else 0 + print(f" {ob.name} -> collection '{coll_name}' ({coll_objs} objects)") + +# Mesh statistics +print("\n--- Mesh Statistics ---") +total_verts = 0 +total_polys = 0 +total_loops = 0 +large_meshes = [] +invalid_meshes = [] +meshes_no_mat = [] +meshes_many_mats = [] + +for me in bpy.data.meshes: + nv = len(me.vertices) + np = len(me.polygons) + nl = len(me.loops) + total_verts += nv + total_polys += np + total_loops += nl + if nv > 10000: + large_meshes.append((me.name, nv, np)) + if len(me.materials) == 0 and np > 0: + meshes_no_mat.append(me.name) + if len(me.materials) > 10: + meshes_many_mats.append((me.name, len(me.materials))) + # Validate + is_valid = me.validate(verbose=False) + if is_valid: # validate returns True if it fixed something + invalid_meshes.append(me.name) + +print(f"Total vertices: {total_verts}") +print(f"Total polygons: {total_polys}") +print(f"Total loops: {total_loops}") + +if large_meshes: + print(f"\nLarge meshes (>10k verts):") + for name, nv, np in sorted(large_meshes, key=lambda x: -x[1]): + print(f" {name}: {nv} verts, {np} polys") + +if invalid_meshes: + print(f"\nMeshes with validation issues ({len(invalid_meshes)}):") + for name in invalid_meshes[:20]: + print(f" {name}") + +if meshes_no_mat: + print(f"\nMeshes with polygons but no materials ({len(meshes_no_mat)}):") + for name in meshes_no_mat[:20]: + print(f" {name}") + +if meshes_many_mats: + print(f"\nMeshes with >10 material slots:") + for name, count in meshes_many_mats: + print(f" {name}: {count} materials") + +# Object hierarchy depth +def max_depth(ob, d=0): + children = [c for c in bpy.data.objects if c.parent == ob] + if not children: + return d + return max(max_depth(c, d+1) for c in children) + +roots = [ob for ob in bpy.data.objects if ob.parent is None] +print(f"\nRoot objects: {len(roots)}") +deepest = 0 +deepest_name = "" +for r in roots: + d = max_depth(r) + if d > deepest: + deepest = d + deepest_name = r.name +print(f"Max hierarchy depth: {deepest} (from '{deepest_name}')") + +# Hidden objects +hidden = sum(1 for ob in bpy.data.objects if ob.hide_viewport or ob.hide_get()) +print(f"Hidden objects: {hidden}") + +# Objects with negative scale (can cause rendering issues) +neg_scale = [ob for ob in bpy.data.objects if any(s < 0 for s in ob.scale)] +if neg_scale: + print(f"\nObjects with negative scale ({len(neg_scale)}):") + for ob in neg_scale[:10]: + print(f" {ob.name}: scale={tuple(ob.scale)}") + +# Check render engine +print(f"\nRender engine: {bpy.context.scene.render.engine}") + +# Material issues +print("\n--- Material Diagnostics ---") +for mat in bpy.data.materials: + issues = [] + if mat.node_tree: + for node in mat.node_tree.nodes: + if node.type == 'TEX_IMAGE': + if node.image is None: + issues.append("Image Texture node with no image") + elif node.image.packed_file is None: + issues.append(f"Unpacked image: {node.image.name}") + elif node.image.size[0] == 0 or node.image.size[1] == 0: + issues.append(f"Zero-size image: {node.image.name}") + if issues: + print(f" {mat.name}: {', '.join(issues)}") + +# Check for extremely small images (potential texture issues) +print("\n--- Small Textures (may look wrong) ---") +for img in bpy.data.images: + if img.name == "Render Result": + continue + w, h = img.size + if w > 0 and h > 0 and (w < 16 or h < 16): + users = sum(1 for mat in bpy.data.materials if mat.node_tree and + any(n.type == 'TEX_IMAGE' and n.image == img for n in mat.node_tree.nodes)) + print(f" {img.name}: {w}x{h} (used by {users} material(s))") + +# Depsgraph evaluation +print("\n--- Scene evaluation ---") +dg = bpy.context.evaluated_depsgraph_get() +print(f"Depsgraph updates: {len(dg.updates)}") +eval_objects = sum(1 for _ in dg.object_instances) +print(f"Evaluated object instances (with duplis): {eval_objects}") + +print("\n" + "=" * 70) +print("DIAGNOSTICS COMPLETE") +print("=" * 70) diff --git a/skp2blend/inspect_blend.py b/skp2blend/inspect_blend.py new file mode 100644 index 0000000..03fe6a7 --- /dev/null +++ b/skp2blend/inspect_blend.py @@ -0,0 +1,32 @@ +import bpy +bpy.ops.wm.open_mainfile(filepath="/data/kornlada.blend") + +print("MATERIAL_COUNT:", len(bpy.data.materials)) +print("IMAGE_COUNT:", len(bpy.data.images)) +print("MESH_COUNT:", len(bpy.data.meshes)) + +for m in bpy.data.materials: + has_tex = False + tex_info = "" + if m.node_tree: + for n in m.node_tree.nodes: + if n.type == "TEX_IMAGE": + has_tex = True + if n.image: + tex_info = n.image.name + " packed=" + str(n.image.packed_file is not None) + else: + tex_info = "NO_IMAGE_SET" + print("MAT:", m.name, "| has_tex:", has_tex, "|", tex_info) + +for img in bpy.data.images: + print("IMG:", img.name, "| packed:", img.packed_file is not None, "| size:", img.size[0], "x", img.size[1]) + +uv = 0 +no_uv = 0 +for me in bpy.data.meshes: + if me.uv_layers: + uv += 1 + else: + no_uv += 1 +print("MESHES_WITH_UV:", uv) +print("MESHES_WITHOUT_UV:", no_uv) diff --git a/skp2blend/intermediate.py b/skp2blend/intermediate.py new file mode 100644 index 0000000..cd66183 --- /dev/null +++ b/skp2blend/intermediate.py @@ -0,0 +1,206 @@ +"""Shared intermediate JSON schema for the two-stage SKP-to-Blend converter. + +Pure Python — no external dependencies. Imported by both skp_extractor.py +(Stage 1, runs under Wine) and blend_builder.py (Stage 2, runs under Blender). +""" + +import json +import os + +SCHEMA_VERSION = 1 + + +# --------------------------------------------------------------------------- +# Data helpers — plain dicts are used instead of dataclasses so the module +# works on the Windows-embeddable Python 3.11 distribution (no pip). +# --------------------------------------------------------------------------- + +def make_texture_record( + filename, + width, + height, + s_scale=1.0, + t_scale=1.0, + use_alpha_channel=False, +): + return { + "filename": filename, + "width": width, + "height": height, + "s_scale": s_scale, + "t_scale": t_scale, + "use_alpha_channel": use_alpha_channel, + } + + +def make_material_record( + name, + color_rgba, + opacity=1.0, + texture=None, +): + """Create a material record. + + Parameters + ---------- + color_rgba : list[int] + [R, G, B, A] with values 0-255. + texture : dict | None + Result of ``make_texture_record`` or ``None``. + """ + return { + "name": name, + "color_rgba": list(color_rgba), + "opacity": opacity, + "texture": texture, + } + + +def make_mesh_data( + vertices, + triangles, + uvs_per_triangle, + triangle_material_indices, + triangle_smooth, + face_materials, +): + """Create a mesh-data record. + + Parameters + ---------- + vertices : list[list[float]] + [[x, y, z], ...] in **meters**. + triangles : list[list[int]] + [[i0, i1, i2], ...] vertex-index triples. + uvs_per_triangle : list[list[float]] + [[u0, v0, u1, v1, u2, v2], ...] — six floats per triangle matching + the existing ``uv_list`` format in ``write_mesh_data``. + triangle_material_indices : list[int] + Per-triangle material slot index into *face_materials*. + triangle_smooth : list[bool] + Per-triangle smooth flag. + face_materials : list[str] + Ordered list of material names corresponding to slot indices. + """ + return { + "vertices": vertices, + "triangles": triangles, + "uvs_per_triangle": uvs_per_triangle, + "triangle_material_indices": triangle_material_indices, + "triangle_smooth": triangle_smooth, + "face_materials": face_materials, + } + + +def make_entity_node( + node_type, + name, + transform=None, + material_name=None, + layer_name=None, + hidden=False, + definition_name=None, + mesh=None, + children=None, +): + """Create a recursive entity-tree node. + + Parameters + ---------- + node_type : str + One of ``"root"``, ``"group"``, ``"component_instance"``. + transform : list[list[float]] | None + 4x4 row-major matrix (same layout as ``sketchup.pyx`` returns). + mesh : dict | None + Result of ``make_mesh_data`` or ``None``. + children : list[dict] | None + Nested ``make_entity_node`` dicts. + """ + return { + "type": node_type, + "name": name, + "transform": transform, + "material_name": material_name, + "layer_name": layer_name, + "hidden": hidden, + "definition_name": definition_name, + "mesh": mesh, + "children": children or [], + } + + +def make_camera_record(position, target, up, fov, perspective, aspect_ratio): + """Create a camera record. + + Parameters + ---------- + position, target, up : list[float] + 3-element lists in **meters**. + fov : float | None + Field of view in degrees, or ``None`` for orthographic. + perspective : bool + aspect_ratio : float | None + ``None`` when the camera uses the dynamic/screen ratio. + """ + return { + "position": list(position), + "target": list(target), + "up": list(up), + "fov": fov, + "perspective": perspective, + "aspect_ratio": aspect_ratio, + } + + +def make_scene_record(name, camera, hidden_layer_names=None): + return { + "name": name, + "camera": camera, + "hidden_layer_names": hidden_layer_names or [], + } + + +def make_component_def_record(name, depth): + return {"name": name, "depth": depth} + + +# --------------------------------------------------------------------------- +# Top-level document +# --------------------------------------------------------------------------- + +def make_intermediate( + materials, + component_definitions, + entity_tree, + cameras, + scenes=None, +): + """Build the complete intermediate dict ready for JSON serialization.""" + return { + "schema_version": SCHEMA_VERSION, + "materials": materials, + "component_definitions": component_definitions, + "entity_tree": entity_tree, + "cameras": cameras, + "scenes": scenes or [], + } + + +# --------------------------------------------------------------------------- +# I/O +# --------------------------------------------------------------------------- + +def save_intermediate(data, work_dir): + """Write *data* to ``/intermediate.json``.""" + os.makedirs(work_dir, exist_ok=True) + path = os.path.join(work_dir, "intermediate.json") + with open(path, "w", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False) + return path + + +def load_intermediate(work_dir): + """Read and return the intermediate dict from *work_dir*.""" + path = os.path.join(work_dir, "intermediate.json") + with open(path, encoding="utf-8") as f: + return json.load(f) diff --git a/skp2blend/obj_builder.py b/skp2blend/obj_builder.py new file mode 100644 index 0000000..3676477 --- /dev/null +++ b/skp2blend/obj_builder.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3 +"""Stage 2b — Build a Wavefront OBJ (+MTL) file from intermediate JSON. + +Pure Python — no Blender or numpy dependency. + +Usage:: + + python3 obj_builder.py [--scene NAME] + +Reads ``intermediate.json`` from *work_dir*, flattens the entity tree +into world-space geometry, and writes ``.obj`` + ``.mtl`` +alongside each other. Texture files are copied into an ``textures/`` +directory next to the OBJ. +""" + +import argparse +import os +import shutil +import sys + +_this_dir = os.path.dirname(os.path.abspath(__file__)) +if _this_dir not in sys.path: + sys.path.insert(0, _this_dir) + +from intermediate import load_intermediate # noqa: E402, I001 +from skputil import DEFAULT_MATERIAL_NAME, inherent_default_mat # noqa: E402 + + +# --------------------------------------------------------------------------- +# Pure-Python 4x4 matrix math (list-of-lists, row-major) +# --------------------------------------------------------------------------- + +_IDENTITY = [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], +] + + +def mat4_multiply(a, b): + """Multiply two 4x4 matrices (list-of-4-lists-of-4-floats).""" + result = [[0.0] * 4 for _ in range(4)] + for i in range(4): + for j in range(4): + s = 0.0 + for k in range(4): + s += a[i][k] * b[k][j] + result[i][j] = s + return result + + +def mat4_transform_point(m, p): + """Transform a 3D point *p* by 4x4 matrix *m* (homogeneous, w=1).""" + x = m[0][0] * p[0] + m[0][1] * p[1] + m[0][2] * p[2] + m[0][3] + y = m[1][0] * p[0] + m[1][1] * p[1] + m[1][2] * p[2] + m[1][3] + z = m[2][0] * p[0] + m[2][1] * p[1] + m[2][2] * p[2] + m[2][3] + return [x, y, z] + + +# --------------------------------------------------------------------------- +# MTL writer +# --------------------------------------------------------------------------- + +def write_mtl(material_records, textures_src_dir, mtl_path, output_dir): + """Write a Wavefront .mtl file and copy referenced textures. + + Returns a set of material names that were written. + """ + textures_dst_dir = os.path.join(output_dir, "textures") + written_names = set() + + with open(mtl_path, "w", encoding="utf-8") as f: + # Default material + f.write(f"newmtl {DEFAULT_MATERIAL_NAME}\n") + f.write("Kd 0.8 0.8 0.8\n") + f.write("d 1.0\n\n") + written_names.add(DEFAULT_MATERIAL_NAME) + + for rec in material_records: + name = rec["name"] + r, g, b, a = rec["color_rgba"] + opacity = rec.get("opacity", round(a / 255.0, 4)) + tex = rec.get("texture") + + f.write(f"newmtl {name}\n") + f.write(f"Kd {r / 255.0:.6f} {g / 255.0:.6f} {b / 255.0:.6f}\n") + f.write(f"d {opacity}\n") + + if tex: + tex_filename = tex["filename"] + src = os.path.join(textures_src_dir, tex_filename) + if os.path.isfile(src): + os.makedirs(textures_dst_dir, exist_ok=True) + dst = os.path.join(textures_dst_dir, tex_filename) + if not os.path.isfile(dst): + shutil.copy2(src, dst) + f.write(f"map_Kd textures/{tex_filename}\n") + else: + print(f"OBJ | Warning: texture not found: {src}") + + f.write("\n") + written_names.add(name) + + return written_names + + +# --------------------------------------------------------------------------- +# OBJ writer +# --------------------------------------------------------------------------- + +def write_obj(data, obj_path, mtl_filename): + """Write a Wavefront .obj file from intermediate data. + + Flattens the entity tree, baking transforms into world-space vertex + positions. Each mesh leaf becomes a named ``o`` block. All geometry + is included regardless of tag/layer visibility — OBJ has no concept + of collection exclusion. + """ + entity_tree = data["entity_tree"] + + # Collected geometry: list of dicts with keys: + # name, vertices (world-space), triangles, uvs, tri_mat_names + meshes = [] + + _walk_entities( + entity_tree, + _IDENTITY, + DEFAULT_MATERIAL_NAME, + meshes, + ) + + # Write OBJ + v_offset = 1 # OBJ indices are 1-based + vt_offset = 1 + + with open(obj_path, "w", encoding="utf-8") as f: + f.write("# Exported by skp2blend obj_builder\n") + f.write(f"mtllib {mtl_filename}\n\n") + + for mesh_entry in meshes: + obj_name = mesh_entry["name"] + verts = mesh_entry["vertices"] + tris = mesh_entry["triangles"] + uvs = mesh_entry["uvs"] + tri_mat_names = mesh_entry["tri_mat_names"] + has_uvs = bool(uvs) and any(uvs) + + f.write(f"o {obj_name}\n") + + # Vertices — convert Z-up (SketchUp/Blender) to Y-up (OBJ convention) + for v in verts: + f.write(f"v {v[0]:.6f} {v[2]:.6f} {-v[1]:.6f}\n") + + # UVs + if has_uvs: + for i, uv_data in enumerate(uvs): + if uv_data: + # Each entry is [u0, v0, u1, v1, u2, v2] + f.write(f"vt {uv_data[0]:.6f} {uv_data[1]:.6f}\n") + f.write(f"vt {uv_data[2]:.6f} {uv_data[3]:.6f}\n") + f.write(f"vt {uv_data[4]:.6f} {uv_data[5]:.6f}\n") + else: + f.write("vt 0.0 0.0\n") + f.write("vt 0.0 0.0\n") + f.write("vt 0.0 0.0\n") + + # Faces grouped by material + current_mat = None + for tri_idx, tri in enumerate(tris): + mat_name = tri_mat_names[tri_idx] if tri_idx < len(tri_mat_names) else DEFAULT_MATERIAL_NAME + if mat_name != current_mat: + f.write(f"usemtl {mat_name}\n") + current_mat = mat_name + + if has_uvs: + vt0 = vt_offset + tri_idx * 3 + f.write( + f"f {tri[0] + v_offset}/{vt0}" + f" {tri[1] + v_offset}/{vt0 + 1}" + f" {tri[2] + v_offset}/{vt0 + 2}\n" + ) + else: + f.write( + f"f {tri[0] + v_offset}" + f" {tri[1] + v_offset}" + f" {tri[2] + v_offset}\n" + ) + + v_offset += len(verts) + if has_uvs: + vt_offset += len(tris) * 3 + + f.write("\n") + + print(f"OBJ | Wrote {len(meshes)} object(s), {v_offset - 1} vertices total") + + +def _walk_entities(node, parent_mat, default_material, meshes): + """Recursively walk the entity tree, collecting flattened mesh data.""" + if node.get("hidden"): + return + + # Compute this node's world transform + node_transform = node.get("transform") + if node_transform: + world_mat = mat4_multiply(parent_mat, node_transform) + else: + world_mat = parent_mat + + # Emit mesh if present + mesh_data = node.get("mesh") + if mesh_data and mesh_data.get("vertices"): + _emit_mesh(node["name"], mesh_data, world_mat, default_material, meshes) + + # Recurse into children + for child in node.get("children", []): + child_mat = inherent_default_mat(child.get("material_name"), default_material) + _walk_entities(child, world_mat, child_mat, meshes) + + +def _emit_mesh(name, mesh_data, world_mat, default_material, meshes): + """Transform vertices to world space and resolve material names.""" + verts = mesh_data["vertices"] + tris = mesh_data["triangles"] + uvs = mesh_data.get("uvs_per_triangle", []) + mat_indices = mesh_data.get("triangle_material_indices", []) + face_materials = mesh_data.get("face_materials", []) + + # Transform vertices to world space + world_verts = [mat4_transform_point(world_mat, v) for v in verts] + + # Resolve per-triangle material names + tri_mat_names = [] + for idx in mat_indices: + if idx < len(face_materials): + mat_name = face_materials[idx] + # Apply material inheritance + mat_name = inherent_default_mat(mat_name, default_material) + tri_mat_names.append(mat_name) + else: + tri_mat_names.append(default_material) + + meshes.append({ + "name": name, + "vertices": world_verts, + "triangles": tris, + "uvs": uvs, + "tri_mat_names": tri_mat_names, + }) + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main(): + parser = argparse.ArgumentParser(description="Build a Wavefront OBJ from intermediate JSON") + parser.add_argument("work_dir", help="Directory containing intermediate.json and textures/") + parser.add_argument("output_obj", help="Output .obj file path") + parser.add_argument("--scene", type=str, default="", help="Import a specific named scene") + args = parser.parse_args() + + print(f"OBJ | Loading intermediate data from {args.work_dir}") + data = load_intermediate(args.work_dir) + + obj_path = os.path.abspath(args.output_obj) + output_dir = os.path.dirname(obj_path) + base = os.path.splitext(os.path.basename(obj_path))[0] + mtl_filename = base + ".mtl" + mtl_path = os.path.join(output_dir, mtl_filename) + + # Write materials + textures_src_dir = os.path.join(args.work_dir, "textures") + print(f"OBJ | Writing materials to {mtl_path}") + written_mats = write_mtl(data["materials"], textures_src_dir, mtl_path, output_dir) + print(f"OBJ | {len(written_mats)} material(s)") + + # Write geometry + print(f"OBJ | Writing geometry to {obj_path}") + write_obj(data, obj_path, mtl_filename) + + print(f"OBJ | Done: {obj_path}") + + +if __name__ == "__main__": + main() diff --git a/skp2blend/render_preview.py b/skp2blend/render_preview.py new file mode 100644 index 0000000..a53f185 --- /dev/null +++ b/skp2blend/render_preview.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +"""Render a PNG preview of a .blend file. + +Runs inside Blender headless:: + + blender --background model.blend --python render_preview.py -- output.png +""" + +import math +import sys + +import bpy + + +def main(): + argv = sys.argv + if "--" in argv: + argv = argv[argv.index("--") + 1:] + else: + argv = [] + + if not argv: + print("Usage: blender --background file.blend --python render_preview.py -- output.png", file=sys.stderr) + sys.exit(1) + + output_path = argv[0] + + scene = bpy.context.scene + + # --- Lighting --- + # Add a sun light for key illumination + sun_data = bpy.data.lights.new("Preview Sun", type="SUN") + sun_data.energy = 3.0 + sun_obj = bpy.data.objects.new("Preview Sun", sun_data) + bpy.context.collection.objects.link(sun_obj) + sun_obj.rotation_euler = (math.radians(45), math.radians(15), math.radians(30)) + + # Light world background for ambient fill + world = bpy.data.worlds.get("World") or bpy.data.worlds.new("World") + scene.world = world + if not world.node_tree: + world.use_nodes = True + bg = world.node_tree.nodes.get("Background") + if bg: + bg.inputs["Color"].default_value = (0.7, 0.75, 0.8, 1.0) + bg.inputs["Strength"].default_value = 0.5 + + # --- Render settings --- + scene.render.resolution_x = 1920 + scene.render.resolution_y = 1080 + scene.render.resolution_percentage = 100 + scene.render.image_settings.file_format = "PNG" + scene.render.filepath = output_path + + # Prefer EEVEE for speed; the engine should already be set by blend_builder + # but ensure it in case the file was created differently. + if bpy.app.version >= (5, 0, 0): + scene.render.engine = "BLENDER_EEVEE" + else: + scene.render.engine = "BLENDER_EEVEE_NEXT" + + # --- Render --- + bpy.ops.render.render(write_still=True) + print(f"Preview saved to {output_path}") + + +if __name__ == "__main__": + main() diff --git a/skp2blend/skp_extractor.py b/skp2blend/skp_extractor.py new file mode 100644 index 0000000..83a0ff6 --- /dev/null +++ b/skp2blend/skp_extractor.py @@ -0,0 +1,389 @@ +#!/usr/bin/env python +"""Stage 1 — Extract data from a .skp file into the intermediate JSON format. + +Runs under Wine with the Windows Python + compiled ``sketchup.pyd`` + SketchUpAPI.dll. + +Usage:: + + python skp_extractor.py + +Produces ``/intermediate.json`` and ``/textures/``. +""" + +import os +import sys +from collections import OrderedDict + +# ``sketchup`` is the compiled Cython extension (sketchup.pyd on Windows). +import sketchup + +# Sibling modules — will be on sys.path when invoked via cli.py or directly. +from intermediate import ( + make_camera_record, + make_component_def_record, + make_entity_node, + make_intermediate, + make_material_record, + make_mesh_data, + make_scene_record, + make_texture_record, + save_intermediate, +) +from skputil import DEFAULT_MATERIAL_NAME, keep_offset + + +def skp_log(*args): + if args: + print("SU | " + " ".join(str(a) for a in args)) + + +# --------------------------------------------------------------------------- +# Material extraction +# --------------------------------------------------------------------------- + +def extract_materials(model, work_dir): + """Return a list of ``MaterialRecord`` dicts and a scales mapping.""" + textures_dir = os.path.join(work_dir, "textures") + os.makedirs(textures_dir, exist_ok=True) + + materials = [] + material_scales = {} # name -> (s_scale, t_scale) + used_filenames = {} # base filename -> count (for deduplication) + + for mat in model.materials: + name = mat.name + r, g, b, a = mat.color + opacity = mat.opacity + tex = mat.texture + + tex_record = None + if tex: + dims = tex.dimensions # (width, height, s_scale, t_scale) + material_scales[name] = (dims[2], dims[3]) + + tex_filename = tex.name.split(os.sep)[-1] + # Also handle backslash paths from Windows SDK + tex_filename = tex_filename.split("\\")[-1] + + # Deduplicate filenames: multiple materials can reference the + # same texture filename but with different content (cropped or + # scaled differently by SketchUp). Append a counter so each + # material gets its own file on disk. + if tex_filename in used_filenames: + used_filenames[tex_filename] += 1 + base, ext = os.path.splitext(tex_filename) + tex_filename = f"{base}_{used_filenames[tex_filename]}{ext}" + else: + used_filenames[tex_filename] = 0 + + tex_path = os.path.join(textures_dir, tex_filename) + try: + tex.write(tex_path) + except Exception as e: + skp_log(f"Warning: could not write texture {tex_filename}: {e}") + + tex_record = make_texture_record( + filename=tex_filename, + width=dims[0], + height=dims[1], + s_scale=dims[2], + t_scale=dims[3], + use_alpha_channel=tex.use_alpha_channel, + ) + else: + material_scales[name] = (1.0, 1.0) + + materials.append(make_material_record( + name=name, + color_rgba=[r, g, b, a], + opacity=opacity, + texture=tex_record, + )) + + return materials, material_scales + + +# --------------------------------------------------------------------------- +# Mesh extraction (ports write_mesh_data) +# --------------------------------------------------------------------------- + +def extract_mesh(entities, name, default_material, material_scales): + """Return a ``MeshData`` dict or ``None``.""" + verts = [] + loops_vert_idx = [] + mat_index = [] + smooth = [] + mats = keep_offset() + seen = keep_offset() + uv_list = [] + + for f in entities.faces: + if f.material: + mat_number = mats[f.material.name] + else: + mat_number = mats[default_material] + if default_material != DEFAULT_MATERIAL_NAME: + try: + f.st_scale = material_scales[default_material] + except KeyError: + pass + + vs, tri, uvs = f.tessfaces + + mapping = {} + for i, (v, uv) in enumerate(zip(vs, uvs)): + prev_len = len(seen) + mapping[i] = seen[v] + if len(seen) > prev_len: + verts.append(list(v)) + uvs.append(uv) + + smooth_edge = False + for edge in f.edges: + if edge.GetSmooth(): + smooth_edge = True + break + + for face in tri: + f0, f1, f2 = face[0], face[1], face[2] + + if mapping[f2] == 0: + loops_vert_idx.extend([mapping[f2], mapping[f0], mapping[f1]]) + uv_list.append([ + uvs[f2][0], uvs[f2][1], + uvs[f0][0], uvs[f0][1], + uvs[f1][0], uvs[f1][1], + ]) + else: + loops_vert_idx.extend([mapping[f0], mapping[f1], mapping[f2]]) + uv_list.append([ + uvs[f0][0], uvs[f0][1], + uvs[f1][0], uvs[f1][1], + uvs[f2][0], uvs[f2][1], + ]) + + smooth.append(smooth_edge) + mat_index.append(mat_number) + + if not verts: + return None + + # Build the ordered material-name list (same order as the original code) + mats_sorted = OrderedDict(sorted(mats.items(), key=lambda x: x[1])) + face_materials = list(mats_sorted.keys()) + + triangles = list(zip(*[iter(loops_vert_idx)] * 3)) + triangles = [list(t) for t in triangles] + + return make_mesh_data( + vertices=verts, + triangles=triangles, + uvs_per_triangle=uv_list, + triangle_material_indices=mat_index, + triangle_smooth=smooth, + face_materials=face_materials, + ) + + +# --------------------------------------------------------------------------- +# Entity-tree extraction (ports write_entities recursion) +# --------------------------------------------------------------------------- + +def _mat_name_from_obj(obj): + """Return the material name string or None.""" + mat = obj.material + if mat is None: + return None + return mat.name + + +def _layer_name_from_obj(obj): + """Return the layer name string or None.""" + lay = obj.layer + if lay is None: + return None + return lay.name + + +def _inherent(mat_name, default_material): + if mat_name is None: + mat_name = default_material + if mat_name == DEFAULT_MATERIAL_NAME and default_material != DEFAULT_MATERIAL_NAME: + mat_name = default_material + return mat_name + + +def extract_entity_tree(entities, skp_components, material_scales): + """Build the root EntityNode tree from the model's top-level entities.""" + + def walk(entities, name, default_material, node_type): + mesh = extract_mesh(entities, name, default_material, material_scales) + + children = [] + + for group in entities.groups: + if group.hidden: + continue + gmat = _inherent(_mat_name_from_obj(group), default_material) + child = walk( + group.entities, + "G-" + group.name, + gmat, + "group", + ) + child["transform"] = group.transform + child["material_name"] = _mat_name_from_obj(group) + child["layer_name"] = _layer_name_from_obj(group) + child["hidden"] = group.hidden + children.append(child) + + for instance in entities.instances: + if instance.hidden: + continue + imat = _inherent(_mat_name_from_obj(instance), default_material) + cdef = skp_components.get(instance.definition.name) + if cdef is None: + continue + if instance.name: + cname = instance.name + " (C-" + cdef.name + ")" + else: + cname = "C-" + cdef.name + child = walk( + cdef.entities, + cname, + imat, + "component_instance", + ) + child["transform"] = instance.transform + child["material_name"] = _mat_name_from_obj(instance) + child["layer_name"] = _layer_name_from_obj(instance) + child["hidden"] = instance.hidden + child["definition_name"] = cdef.name + children.append(child) + + return make_entity_node( + node_type=node_type, + name=name, + mesh=mesh, + children=children, + ) + + return walk(entities, "_(Loose Entity)", DEFAULT_MATERIAL_NAME, "root") + + +# --------------------------------------------------------------------------- +# Camera / scene extraction +# --------------------------------------------------------------------------- + +def extract_camera(camera): + pos, target, up = camera.GetOrientation() + fov = camera.fov # False when ortho + perspective = camera.perspective + aspect_ratio = camera.aspect_ratio # False when dynamic + + return make_camera_record( + position=list(pos), + target=list(target), + up=list(up), + fov=fov if fov is not False else None, + perspective=perspective, + aspect_ratio=aspect_ratio if aspect_ratio is not False else None, + ) + + +def extract_scenes(model): + scenes = [] + for s in model.scenes: + cam = extract_camera(s.camera) + hidden = [lay.name for lay in s.layers] + scenes.append(make_scene_record( + name=s.name, + camera=cam, + hidden_layer_names=hidden, + )) + return scenes + + +# --------------------------------------------------------------------------- +# Component-depth analysis (ports SKP_util.component_deps on live SDK objects) +# --------------------------------------------------------------------------- + +def _live_component_deps(entities, comp=True): + own_depth = 1 if comp else 0 + group_depth = 0 + for group in entities.groups: + group_depth = max(group_depth, _live_component_deps(group.entities, comp=False)) + + instance_depth = 0 + for instance in entities.instances: + instance_depth = max( + instance_depth, + 1 + _live_component_deps(instance.definition.entities), + ) + + return max(own_depth, group_depth, instance_depth) + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main(): + if len(sys.argv) < 3: + print(f"Usage: {sys.argv[0]} ", file=sys.stderr) + sys.exit(1) + + input_skp = sys.argv[1] + work_dir = sys.argv[2] + + if not os.path.isfile(input_skp): + print(f"Error: input file not found: {input_skp}", file=sys.stderr) + sys.exit(1) + + skp_log(f"Opening {input_skp}") + model = sketchup.Model.from_file(input_skp) + + # --- Materials --- + skp_log("Extracting materials...") + materials, material_scales = extract_materials(model, work_dir) + skp_log(f" {len(materials)} material(s)") + + # --- Component definitions & depths --- + skp_log("Analyzing component definitions...") + skp_components = {} + for c in model.component_definitions: + skp_components[c.name] = c + + comp_defs = [] + for c in model.component_definitions: + depth = _live_component_deps(c.entities) + comp_defs.append(make_component_def_record(c.name, depth)) + skp_log(f" {len(comp_defs)} component definition(s)") + + # --- Entity tree --- + skp_log("Extracting entity tree...") + entity_tree = extract_entity_tree(model.entities, skp_components, material_scales) + + # --- Cameras --- + skp_log("Extracting cameras...") + cameras = [extract_camera(model.camera)] + + # --- Scenes --- + skp_log("Extracting scenes...") + scenes = extract_scenes(model) + skp_log(f" {len(scenes)} scene(s)") + + # --- Write --- + data = make_intermediate( + materials=materials, + component_definitions=comp_defs, + entity_tree=entity_tree, + cameras=cameras, + scenes=scenes, + ) + out_path = save_intermediate(data, work_dir) + skp_log(f"Wrote {out_path}") + + +if __name__ == "__main__": + main() diff --git a/skp2blend/skputil.py b/skp2blend/skputil.py new file mode 100644 index 0000000..e90fe1d --- /dev/null +++ b/skp2blend/skputil.py @@ -0,0 +1,96 @@ +"""Standalone utility functions used by both extraction and building stages. + +Ported from ``sketchup_importer/SKPutil/__init__.py`` — pure Python, no +``bpy`` or ``sketchup`` SDK imports. +""" + +from collections import defaultdict +from enum import Enum + +DEFAULT_MATERIAL_NAME = "DefaultMaterial" + +_su_group_num = 0 + + +class proxy_dict(dict): + """Dictionary that transparently strips a ``_proxy`` suffix on lookup.""" + + def __getitem__(self, key): + if key.lower().endswith("_proxy"): + try: + return dict.__getitem__(self, key[:-6]) + except KeyError: + return dict.__getitem__(self, key) + try: + return dict.__getitem__(self, key) + except KeyError: + print(f"SU | KeyError: {key}, Skipping...") + return None + + +class keep_offset(defaultdict): + """Auto-incrementing index map — identical semantics to the original.""" + + def __init__(self): + defaultdict.__init__(self, int) + + def __missing__(self, _): + return defaultdict.__len__(self) + + def __getitem__(self, item): + number = defaultdict.__getitem__(self, item) + self[item] = number + return number + + +def group_name(name, material): + if material != DEFAULT_MATERIAL_NAME: + return f"{name}_{material}" + return name + + +def group_safe_name(name): + if not name: + global _su_group_num + _su_group_num += 1 + padded = f"{_su_group_num:03d}" + return f"{name}No_Name_{padded}" + return name + + +def inherent_default_mat(mat_name, default_material): + """Resolve the effective material name. + + Unlike the original which receives a Material SDK object, this version + takes the material *name* (a string or ``None``). + """ + if mat_name is None: + mat_name = default_material + if mat_name == DEFAULT_MATERIAL_NAME and default_material != DEFAULT_MATERIAL_NAME: + mat_name = default_material + return mat_name + + +class EntityType(Enum): + none = 0 + group = 1 + component = 2 + outer = 3 + + +# --------------------------------------------------------------------------- +# Component-depth analysis — operates on the intermediate EntityNode tree +# --------------------------------------------------------------------------- + +def component_deps(node): + """Return the nesting depth of components under *node* (an EntityNode dict). + + This mirrors ``SKP_util.component_deps`` but works on the serialised + intermediate tree rather than live SDK objects. + """ + is_component = node.get("type") == "component_instance" + own_depth = 1 if is_component else 0 + child_depth = 0 + for child in node.get("children", []): + child_depth = max(child_depth, component_deps(child)) + return max(own_depth, child_depth) diff --git a/skp2blend/verify_fix.py b/skp2blend/verify_fix.py new file mode 100644 index 0000000..a9b1c3e --- /dev/null +++ b/skp2blend/verify_fix.py @@ -0,0 +1,30 @@ +import bpy +bpy.ops.wm.open_mainfile(filepath="/data/kornlada.blend") + +print("=== Timmer Gavelsida materials ===") +for m in bpy.data.materials: + if "Timmer" in m.name: + tex_info = "no texture" + if m.node_tree: + for n in m.node_tree.nodes: + if n.type == "TEX_IMAGE" and n.image: + tex_info = f"{n.image.name} {n.image.size[0]}x{n.image.size[1]} packed={n.image.packed_file is not None}" + print(f" {m.name:30s} {tex_info}") + +print() +print("=== Summary ===") +print(f"Materials: {len(bpy.data.materials)}") +print(f"Images: {len(bpy.data.images)}") + +uv = sum(1 for me in bpy.data.meshes if me.uv_layers) +no_uv = sum(1 for me in bpy.data.meshes if not me.uv_layers) +print(f"Meshes with UV: {uv}") +print(f"Meshes without UV: {no_uv}") + +# Verify all texture images have distinct content (unique sizes) +print() +print("=== All texture images and sizes ===") +for img in sorted(bpy.data.images, key=lambda i: i.name): + if img.name == "Render Result": + continue + print(f" {img.name:55s} {img.size[0]:5d}x{img.size[1]:<5d} packed={img.packed_file is not None}")