mirror of
https://github.com/Ultimaker/Cura.git
synced 2025-08-08 14:34:01 -06:00
Merge branch 'master' into python_type_hinting
This commit is contained in:
commit
98a6568313
416 changed files with 160213 additions and 173190 deletions
|
@ -8,15 +8,20 @@ from UM.Math.Matrix import Matrix
|
|||
from UM.Math.Vector import Vector
|
||||
from UM.Scene.SceneNode import SceneNode
|
||||
from UM.Scene.GroupDecorator import GroupDecorator
|
||||
from UM.Math.Quaternion import Quaternion
|
||||
import UM.Application
|
||||
from UM.Job import Job
|
||||
from cura.Settings.SettingOverrideDecorator import SettingOverrideDecorator
|
||||
from UM.Application import Application
|
||||
from cura.Settings.ExtruderManager import ExtruderManager
|
||||
from cura.QualityManager import QualityManager
|
||||
|
||||
import math
|
||||
import os.path
|
||||
import zipfile
|
||||
|
||||
try:
|
||||
import xml.etree.cElementTree as ET
|
||||
except ImportError:
|
||||
Logger.log("w", "Unable to load cElementTree, switching to slower version")
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
## Base implementation for reading 3MF files. Has no support for textures. Only loads meshes!
|
||||
|
@ -24,104 +29,225 @@ class ThreeMFReader(MeshReader):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self._supported_extensions = [".3mf"]
|
||||
|
||||
self._root = None
|
||||
self._namespaces = {
|
||||
"3mf": "http://schemas.microsoft.com/3dmanufacturing/core/2015/02",
|
||||
"cura": "http://software.ultimaker.com/xml/cura/3mf/2015/10"
|
||||
}
|
||||
self._base_name = ""
|
||||
self._unit = None
|
||||
|
||||
def _createNodeFromObject(self, object, name = ""):
|
||||
node = SceneNode()
|
||||
node.setName(name)
|
||||
mesh_builder = MeshBuilder()
|
||||
vertex_list = []
|
||||
|
||||
components = object.find(".//3mf:components", self._namespaces)
|
||||
if components:
|
||||
for component in components:
|
||||
id = component.get("objectid")
|
||||
new_object = self._root.find("./3mf:resources/3mf:object[@id='{0}']".format(id), self._namespaces)
|
||||
new_node = self._createNodeFromObject(new_object, self._base_name + "_" + str(id))
|
||||
node.addChild(new_node)
|
||||
transform = component.get("transform")
|
||||
if transform is not None:
|
||||
new_node.setTransformation(self._createMatrixFromTransformationString(transform))
|
||||
|
||||
# for vertex in entry.mesh.vertices.vertex:
|
||||
for vertex in object.findall(".//3mf:vertex", self._namespaces):
|
||||
vertex_list.append([vertex.get("x"), vertex.get("y"), vertex.get("z")])
|
||||
Job.yieldThread()
|
||||
|
||||
xml_settings = list(object.findall(".//cura:setting", self._namespaces))
|
||||
|
||||
# Add the setting override decorator, so we can add settings to this node.
|
||||
if xml_settings:
|
||||
node.addDecorator(SettingOverrideDecorator())
|
||||
|
||||
global_container_stack = Application.getInstance().getGlobalContainerStack()
|
||||
# Ensure the correct next container for the SettingOverride decorator is set.
|
||||
if global_container_stack:
|
||||
multi_extrusion = global_container_stack.getProperty("machine_extruder_count", "value") > 1
|
||||
# Ensure that all extruder data is reset
|
||||
if not multi_extrusion:
|
||||
default_stack_id = global_container_stack.getId()
|
||||
else:
|
||||
default_stack = ExtruderManager.getInstance().getExtruderStack(0)
|
||||
if default_stack:
|
||||
default_stack_id = default_stack.getId()
|
||||
else:
|
||||
default_stack_id = global_container_stack.getId()
|
||||
node.callDecoration("setActiveExtruder", default_stack_id)
|
||||
|
||||
# Get the definition & set it
|
||||
definition = QualityManager.getInstance().getParentMachineDefinition(global_container_stack.getBottom())
|
||||
node.callDecoration("getStack").getTop().setDefinition(definition)
|
||||
|
||||
setting_container = node.callDecoration("getStack").getTop()
|
||||
for setting in xml_settings:
|
||||
setting_key = setting.get("key")
|
||||
setting_value = setting.text
|
||||
|
||||
# Extruder_nr is a special case.
|
||||
if setting_key == "extruder_nr":
|
||||
extruder_stack = ExtruderManager.getInstance().getExtruderStack(int(setting_value))
|
||||
if extruder_stack:
|
||||
node.callDecoration("setActiveExtruder", extruder_stack.getId())
|
||||
else:
|
||||
Logger.log("w", "Unable to find extruder in position %s", setting_value)
|
||||
continue
|
||||
setting_container.setProperty(setting_key,"value", setting_value)
|
||||
|
||||
if len(node.getChildren()) > 0:
|
||||
group_decorator = GroupDecorator()
|
||||
node.addDecorator(group_decorator)
|
||||
|
||||
triangles = object.findall(".//3mf:triangle", self._namespaces)
|
||||
mesh_builder.reserveFaceCount(len(triangles))
|
||||
|
||||
for triangle in triangles:
|
||||
v1 = int(triangle.get("v1"))
|
||||
v2 = int(triangle.get("v2"))
|
||||
v3 = int(triangle.get("v3"))
|
||||
|
||||
mesh_builder.addFaceByPoints(vertex_list[v1][0], vertex_list[v1][1], vertex_list[v1][2],
|
||||
vertex_list[v2][0], vertex_list[v2][1], vertex_list[v2][2],
|
||||
vertex_list[v3][0], vertex_list[v3][1], vertex_list[v3][2])
|
||||
|
||||
Job.yieldThread()
|
||||
|
||||
# TODO: We currently do not check for normals and simply recalculate them.
|
||||
mesh_builder.calculateNormals()
|
||||
mesh_builder.setFileName(name)
|
||||
mesh_data = mesh_builder.build()
|
||||
|
||||
if len(mesh_data.getVertices()):
|
||||
node.setMeshData(mesh_data)
|
||||
|
||||
node.setSelectable(True)
|
||||
return node
|
||||
|
||||
def _createMatrixFromTransformationString(self, transformation):
|
||||
splitted_transformation = transformation.split()
|
||||
## Transformation is saved as:
|
||||
## M00 M01 M02 0.0
|
||||
## M10 M11 M12 0.0
|
||||
## M20 M21 M22 0.0
|
||||
## M30 M31 M32 1.0
|
||||
## We switch the row & cols as that is how everyone else uses matrices!
|
||||
temp_mat = Matrix()
|
||||
# Rotation & Scale
|
||||
temp_mat._data[0, 0] = splitted_transformation[0]
|
||||
temp_mat._data[1, 0] = splitted_transformation[1]
|
||||
temp_mat._data[2, 0] = splitted_transformation[2]
|
||||
temp_mat._data[0, 1] = splitted_transformation[3]
|
||||
temp_mat._data[1, 1] = splitted_transformation[4]
|
||||
temp_mat._data[2, 1] = splitted_transformation[5]
|
||||
temp_mat._data[0, 2] = splitted_transformation[6]
|
||||
temp_mat._data[1, 2] = splitted_transformation[7]
|
||||
temp_mat._data[2, 2] = splitted_transformation[8]
|
||||
|
||||
# Translation
|
||||
temp_mat._data[0, 3] = splitted_transformation[9]
|
||||
temp_mat._data[1, 3] = splitted_transformation[10]
|
||||
temp_mat._data[2, 3] = splitted_transformation[11]
|
||||
|
||||
return temp_mat
|
||||
|
||||
def read(self, file_name):
|
||||
result = SceneNode()
|
||||
result = []
|
||||
# The base object of 3mf is a zipped archive.
|
||||
archive = zipfile.ZipFile(file_name, "r")
|
||||
self._base_name = os.path.basename(file_name)
|
||||
try:
|
||||
root = ET.parse(archive.open("3D/3dmodel.model"))
|
||||
self._root = ET.parse(archive.open("3D/3dmodel.model"))
|
||||
self._unit = self._root.getroot().get("unit")
|
||||
|
||||
# There can be multiple objects, try to load all of them.
|
||||
objects = root.findall("./3mf:resources/3mf:object", self._namespaces)
|
||||
if len(objects) == 0:
|
||||
Logger.log("w", "No objects found in 3MF file %s, either the file is corrupt or you are using an outdated format", file_name)
|
||||
return None
|
||||
build_items = self._root.findall("./3mf:build/3mf:item", self._namespaces)
|
||||
|
||||
for entry in objects:
|
||||
mesh_builder = MeshBuilder()
|
||||
node = SceneNode()
|
||||
vertex_list = []
|
||||
#for vertex in entry.mesh.vertices.vertex:
|
||||
for vertex in entry.findall(".//3mf:vertex", self._namespaces):
|
||||
vertex_list.append([vertex.get("x"), vertex.get("y"), vertex.get("z")])
|
||||
Job.yieldThread()
|
||||
for build_item in build_items:
|
||||
id = build_item.get("objectid")
|
||||
object = self._root.find("./3mf:resources/3mf:object[@id='{0}']".format(id), self._namespaces)
|
||||
if "type" in object.attrib:
|
||||
if object.attrib["type"] == "support" or object.attrib["type"] == "other":
|
||||
# Ignore support objects, as cura does not support these.
|
||||
# We can't guarantee that they wont be made solid.
|
||||
# We also ignore "other", as I have no idea what to do with them.
|
||||
Logger.log("w", "3MF file contained an object of type %s which is not supported by Cura", object.attrib["type"])
|
||||
continue
|
||||
elif object.attrib["type"] == "solidsupport" or object.attrib["type"] == "model":
|
||||
pass # Load these as normal
|
||||
else:
|
||||
# We should technically fail at this point because it's an invalid 3MF, but try to continue anyway.
|
||||
Logger.log("e", "3MF file contained an object of type %s which is not supported by the 3mf spec",
|
||||
object.attrib["type"])
|
||||
continue
|
||||
|
||||
triangles = entry.findall(".//3mf:triangle", self._namespaces)
|
||||
mesh_builder.reserveFaceCount(len(triangles))
|
||||
build_item_node = self._createNodeFromObject(object, self._base_name + "_" + str(id))
|
||||
transform = build_item.get("transform")
|
||||
if transform is not None:
|
||||
build_item_node.setTransformation(self._createMatrixFromTransformationString(transform))
|
||||
global_container_stack = UM.Application.getInstance().getGlobalContainerStack()
|
||||
|
||||
for triangle in triangles:
|
||||
v1 = int(triangle.get("v1"))
|
||||
v2 = int(triangle.get("v2"))
|
||||
v3 = int(triangle.get("v3"))
|
||||
# Create a transformation Matrix to convert from 3mf worldspace into ours.
|
||||
# First step: flip the y and z axis.
|
||||
transformation_matrix = Matrix()
|
||||
transformation_matrix._data[1, 1] = 0
|
||||
transformation_matrix._data[1, 2] = 1
|
||||
transformation_matrix._data[2, 1] = -1
|
||||
transformation_matrix._data[2, 2] = 0
|
||||
|
||||
mesh_builder.addFaceByPoints(vertex_list[v1][0], vertex_list[v1][1], vertex_list[v1][2],
|
||||
vertex_list[v2][0], vertex_list[v2][1], vertex_list[v2][2],
|
||||
vertex_list[v3][0], vertex_list[v3][1], vertex_list[v3][2])
|
||||
# Second step: 3MF defines the left corner of the machine as center, whereas cura uses the center of the
|
||||
# build volume.
|
||||
if global_container_stack:
|
||||
translation_vector = Vector(x = -global_container_stack.getProperty("machine_width", "value") / 2,
|
||||
y = -global_container_stack.getProperty("machine_depth", "value") / 2,
|
||||
z = 0)
|
||||
translation_matrix = Matrix()
|
||||
translation_matrix.setByTranslation(translation_vector)
|
||||
transformation_matrix.multiply(translation_matrix)
|
||||
|
||||
Job.yieldThread()
|
||||
# Third step: 3MF also defines a unit, wheras Cura always assumes mm.
|
||||
scale_matrix = Matrix()
|
||||
scale_matrix.setByScaleVector(self._getScaleFromUnit(self._unit))
|
||||
transformation_matrix.multiply(scale_matrix)
|
||||
|
||||
# Rotate the model; We use a different coordinate frame.
|
||||
rotation = Matrix()
|
||||
rotation.setByRotationAxis(-0.5 * math.pi, Vector(1, 0, 0))
|
||||
# Pre multiply the transformation with the loaded transformation, so the data is handled correctly.
|
||||
build_item_node.setTransformation(build_item_node.getLocalTransformation().preMultiply(transformation_matrix))
|
||||
|
||||
# TODO: We currently do not check for normals and simply recalculate them.
|
||||
mesh_builder.calculateNormals()
|
||||
mesh_builder.setFileName(file_name)
|
||||
node.setMeshData(mesh_builder.build().getTransformed(rotation))
|
||||
node.setSelectable(True)
|
||||
result.append(build_item_node)
|
||||
|
||||
transformations = root.findall("./3mf:build/3mf:item[@objectid='{0}']".format(entry.get("id")), self._namespaces)
|
||||
transformation = transformations[0] if transformations else None
|
||||
if transformation is not None and transformation.get("transform"):
|
||||
splitted_transformation = transformation.get("transform").split()
|
||||
## Transformation is saved as:
|
||||
## M00 M01 M02 0.0
|
||||
## M10 M11 M12 0.0
|
||||
## M20 M21 M22 0.0
|
||||
## M30 M31 M32 1.0
|
||||
## We switch the row & cols as that is how everyone else uses matrices!
|
||||
temp_mat = Matrix()
|
||||
# Rotation & Scale
|
||||
temp_mat._data[0,0] = splitted_transformation[0]
|
||||
temp_mat._data[1,0] = splitted_transformation[1]
|
||||
temp_mat._data[2,0] = splitted_transformation[2]
|
||||
temp_mat._data[0,1] = splitted_transformation[3]
|
||||
temp_mat._data[1,1] = splitted_transformation[4]
|
||||
temp_mat._data[2,1] = splitted_transformation[5]
|
||||
temp_mat._data[0,2] = splitted_transformation[6]
|
||||
temp_mat._data[1,2] = splitted_transformation[7]
|
||||
temp_mat._data[2,2] = splitted_transformation[8]
|
||||
|
||||
# Translation
|
||||
temp_mat._data[0,3] = splitted_transformation[9]
|
||||
temp_mat._data[1,3] = splitted_transformation[10]
|
||||
temp_mat._data[2,3] = splitted_transformation[11]
|
||||
|
||||
node.setTransformation(temp_mat)
|
||||
|
||||
result.addChild(node)
|
||||
|
||||
Job.yieldThread()
|
||||
|
||||
# If there is more then one object, group them.
|
||||
if len(objects) > 1:
|
||||
group_decorator = GroupDecorator()
|
||||
result.addDecorator(group_decorator)
|
||||
elif len(objects) == 1:
|
||||
result = result.getChildren()[0] # Only one object found, return that.
|
||||
except Exception as e:
|
||||
Logger.log("e", "exception occured in 3mf reader: %s", e)
|
||||
|
||||
try: # Selftest - There might be more functions that should fail
|
||||
boundingBox = result.getBoundingBox()
|
||||
boundingBox.isValid()
|
||||
except:
|
||||
return None
|
||||
Logger.log("e", "An exception occurred in 3mf reader: %s", e)
|
||||
|
||||
return result
|
||||
|
||||
## Create a scale vector based on a unit string.
|
||||
# The core spec defines the following:
|
||||
# * micron
|
||||
# * millimeter (default)
|
||||
# * centimeter
|
||||
# * inch
|
||||
# * foot
|
||||
# * meter
|
||||
def _getScaleFromUnit(self, unit):
|
||||
if unit is None:
|
||||
unit = "millimeter"
|
||||
if unit == "micron":
|
||||
scale = 0.001
|
||||
elif unit == "millimeter":
|
||||
scale = 1
|
||||
elif unit == "centimeter":
|
||||
scale = 10
|
||||
elif unit == "inch":
|
||||
scale = 25.4
|
||||
elif unit == "foot":
|
||||
scale = 304.8
|
||||
elif unit == "meter":
|
||||
scale = 1000
|
||||
else:
|
||||
Logger.log("w", "Unrecognised unit %s used. Assuming mm instead", unit)
|
||||
scale = 1
|
||||
|
||||
return Vector(scale, scale, scale)
|
|
@ -33,7 +33,6 @@ class ChangeLog(Extension, QObject,):
|
|||
Application.getInstance().engineCreatedSignal.connect(self._onEngineCreated)
|
||||
Preferences.getInstance().addPreference("general/latest_version_changelog_shown", "2.0.0") #First version of CURA with uranium
|
||||
self.addMenuItem(catalog.i18nc("@item:inmenu", "Show Changelog"), self.showChangelog)
|
||||
#self.showChangelog()
|
||||
|
||||
def getChangeLogs(self):
|
||||
if not self._change_logs:
|
||||
|
@ -87,6 +86,13 @@ class ChangeLog(Extension, QObject,):
|
|||
else:
|
||||
latest_version_shown = Version(Preferences.getInstance().getValue("general/latest_version_changelog_shown"))
|
||||
|
||||
Preferences.getInstance().setValue("general/latest_version_changelog_shown", Application.getInstance().getVersion())
|
||||
|
||||
# Do not show the changelog when there is no global container stack
|
||||
# This implies we are running Cura for the first time.
|
||||
if not Application.getInstance().getGlobalContainerStack():
|
||||
return
|
||||
|
||||
if self._version > latest_version_shown:
|
||||
self.showChangelog()
|
||||
|
||||
|
@ -95,7 +101,6 @@ class ChangeLog(Extension, QObject,):
|
|||
self.createChangelogWindow()
|
||||
|
||||
self._changelog_window.show()
|
||||
Preferences.getInstance().setValue("general/latest_version_changelog_shown", Application.getInstance().getVersion())
|
||||
|
||||
def hideChangelog(self):
|
||||
if self._changelog_window:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
// Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
import QtQuick 2.1
|
||||
import QtQuick.Controls 1.1
|
||||
import QtQuick.Controls 1.3
|
||||
import QtQuick.Layouts 1.1
|
||||
import QtQuick.Window 2.1
|
||||
|
||||
|
@ -11,33 +11,31 @@ import UM 1.1 as UM
|
|||
UM.Dialog
|
||||
{
|
||||
id: base
|
||||
minimumWidth: 400 * Screen.devicePixelRatio
|
||||
minimumHeight: 300 * Screen.devicePixelRatio
|
||||
minimumWidth: UM.Theme.getSize("modal_window_minimum").width * 0.75
|
||||
minimumHeight: UM.Theme.getSize("modal_window_minimum").height * 0.75
|
||||
width: minimumWidth
|
||||
height: minimumHeight
|
||||
title: catalog.i18nc("@label", "Changelog")
|
||||
|
||||
ScrollView
|
||||
TextArea
|
||||
{
|
||||
width: parent.width
|
||||
height: parent.height - 25
|
||||
Label
|
||||
{
|
||||
text: manager.getChangeLogString()
|
||||
width:base.width - 35
|
||||
wrapMode: Text.Wrap;
|
||||
}
|
||||
anchors.fill: parent
|
||||
text: manager.getChangeLogString()
|
||||
readOnly: true;
|
||||
textFormat: TextEdit.RichText
|
||||
}
|
||||
Button
|
||||
{
|
||||
UM.I18nCatalog
|
||||
|
||||
rightButtons: [
|
||||
Button
|
||||
{
|
||||
id: catalog
|
||||
name: "cura"
|
||||
UM.I18nCatalog
|
||||
{
|
||||
id: catalog
|
||||
name: "cura"
|
||||
}
|
||||
|
||||
text: catalog.i18nc("@action:button", "Close")
|
||||
onClicked: base.hide()
|
||||
}
|
||||
anchors.bottom:parent.bottom
|
||||
text: catalog.i18nc("@action:button", "Close")
|
||||
onClicked: base.hide()
|
||||
anchors.horizontalCenter: parent.horizontalCenter
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,3 +1,125 @@
|
|||
[2.3.1]
|
||||
*Layer Height in Profile Selection
|
||||
Added the layer height to the profile selection menu.
|
||||
|
||||
*Bug fixes
|
||||
Fixed the option to import g-code from related machines as a profile
|
||||
Fixed a bug where editing material settings has no effect on 3D prints
|
||||
Fixed an issue with automatic profile importing on Cura 2.1 on Mac OSX
|
||||
Fixed an inheritance issue for dual extrusion
|
||||
Fixed an issue with "i" symbol updates
|
||||
Fixed a freeze that can occur while printing via Wi-Fi
|
||||
|
||||
[2.3.0]
|
||||
*Multi Extrusion Support
|
||||
Machines with multiple extruders are now supported. Ultimaker 3 printers and Ultimaker Original printers with dual extrusion upgrade kit are currently supported.
|
||||
|
||||
*Network Printing for Ultimaker 3
|
||||
Sending a print to an Ultimaker 3 remotely via the network is now possible. Requires Wi-Fi or LAN to connect to the printer.
|
||||
|
||||
*Print Monitoring for Ultimaker 3
|
||||
You can monitor your print on an Ultimaker 3 with a live camera feed. Requires Wi-Fi or LAN to connect to the printer.
|
||||
|
||||
*Material and Print Core Synchronization
|
||||
Connecting to an Ultimaker 3 now gives you the option to synchronize the materials in Cura with what is loaded in the printer.
|
||||
|
||||
*Speed improvements
|
||||
The first thing you will notice is the speed. STL loading is now 10 to 20 times faster, layer view is significantly faster and slicing speed is slightly improved.
|
||||
|
||||
*Improved Position Tool
|
||||
Place objects precisely where you want them by manually entering the values for the position.
|
||||
|
||||
*Custom Machine Support
|
||||
It’s now much easier to use Cura with custom machines. You can edit the machine settings when you load a new custom machine.
|
||||
|
||||
*Improved Grouping
|
||||
It's now possible to transform objects that are already grouped.
|
||||
Select an individual item in a group or merged object and edit as usual. Just Ctrl + Click and edit away.
|
||||
|
||||
*Enhanced Profile Management
|
||||
Profile management is improved. You can now easily see and track changes made to your profiles.
|
||||
|
||||
*Improved Setting Visibility
|
||||
Make multiple settings visible at the same time with a checkbox. The Visibility Overview setting indicates why a setting is not shown in the sidebar even if it is enabled.
|
||||
|
||||
*Improved time estimation
|
||||
Time estimations are more accurate. Based on our test time estimations should be within 5% accuracy for Ultimaker printers.
|
||||
|
||||
*Optional G-code Machine Prefix
|
||||
Disable the g-code prefix in Preferences. No more UM2_ on your printer display!
|
||||
|
||||
*Print Weight Estimates
|
||||
Cura now estimates print weight as well as length.
|
||||
|
||||
*Automatic Import Configuration
|
||||
Configurations from older installations of Cura 2.1 are automatically imported into the newest installation.
|
||||
|
||||
*Slicing features
|
||||
*Infill Types
|
||||
Two new infill types are now introduced: Tetrahedral and Cubic. They change along with the Z-axis for more uniform strength in all directions. There are now seven infill types to choose from.
|
||||
|
||||
*Gradual Infill
|
||||
Gradual infill lets users adjust infill density, based on the distance from the top layers. This offers faster printing and reduced material requirements, whilst maintaining surface quality.
|
||||
|
||||
*Set Acceleration and Jerk by Feature
|
||||
You can now set Jerk and Acceleration by feature-type (infill, walls, top/bottom, etc), for more precision.
|
||||
|
||||
*Outer Wall Offset
|
||||
If your outer wall line width is smaller than your nozzle size, move the nozzle a bit inward when printing the outer wall, to improve surface quality.
|
||||
|
||||
*Enhanced Combing
|
||||
The “No Skin” option allows you to comb over infill only to avoid scars on top surfaces.
|
||||
|
||||
*Z Hop
|
||||
Can’t avoid previously printed parts by horizontal moves? The Z Hop Only Over Printed Parts gives you the ability to Z Hop to avoid collisions for better surface quality.
|
||||
|
||||
*Skin and Wall Overlap
|
||||
The Skin Overlap setting allows you to overlap the skin lines with the walls for better adhesion.
|
||||
|
||||
*Adjust Initial Layer Travel Speed
|
||||
Set the travel speed of the initial layer(s) to reduce risk of extruder pulling the print from the bed.
|
||||
|
||||
*Support Interface
|
||||
It is now possible to print a support bottom as well as a support roof. Support bottoms are placed where the support rests on the model. Printing the support interface with PVA leads to improved surface quality.
|
||||
|
||||
*Bug fixes
|
||||
Deleting grouped objects
|
||||
Duplicating groups
|
||||
Bridging
|
||||
Drag and drop (first Windows run)
|
||||
Unretraction speeds
|
||||
Bottom layer in Spiralize mode
|
||||
Overlap Compensation
|
||||
Raft retractions
|
||||
Retractions now occur after each object printed in one-at-a-time mode
|
||||
Rafts are no longer printed outside of build area
|
||||
Spiralize no longer limited to the first printed segment only
|
||||
Line distance is now the actual line distance
|
||||
Enabling raft doesn’t influence at which height the model is sliced any more
|
||||
Brim is now always printed just once
|
||||
Support roofs now only occur just below overhang
|
||||
|
||||
*Minor changes
|
||||
Message display time increased to 30 seconds
|
||||
Notification if you try to save to a locked SD card
|
||||
Engine log now included in the application log
|
||||
Undo and Redo now function with multiple operations
|
||||
The last used folder is now remembered rather than defaulting to home folder
|
||||
Import X3D files
|
||||
Made it possible to add multiple Per Model Settings at once
|
||||
Bed Level and Checkup procedures for UMO+ can be performed without re-adding machine
|
||||
Combing applied in more cases and results in better paths
|
||||
Infill thickness now supports Grid infill also for even multiples of the layer height
|
||||
Support is no longer removed by unprintable thin parts of the model
|
||||
Support generated on each appropriate layer
|
||||
Support no longer goes outside overhang areas
|
||||
Support no longer removes brim around the object
|
||||
Brim is now also generated under the support
|
||||
Draft and Ooze shield get their own brim or raft
|
||||
Settings shared between skirt and brim now also activate when brim is selected
|
||||
Compensate overlapping wall parts now also works for inner walls
|
||||
Bed lowering speed can be adjusted for each layer
|
||||
|
||||
[2.1.3]
|
||||
|
||||
*Material Profiles
|
||||
|
|
|
@ -13,7 +13,7 @@ message Slice
|
|||
repeated ObjectList object_lists = 1; // The meshgroups to be printed one after another
|
||||
SettingList global_settings = 2; // The global settings used for the whole print job
|
||||
repeated Extruder extruders = 3; // The settings sent to each extruder object
|
||||
repeated SettingExtruder global_inherits_stack = 4; //From which stack the setting would inherit if not defined in a stack.
|
||||
repeated SettingExtruder limit_to_extruder = 4; // From which stack the setting would inherit if not defined per object
|
||||
}
|
||||
|
||||
message Extruder
|
||||
|
@ -56,6 +56,7 @@ message Polygon {
|
|||
SupportInfillType = 7;
|
||||
MoveCombingType = 8;
|
||||
MoveRetractionType = 9;
|
||||
SupportInterfaceType = 10;
|
||||
}
|
||||
Type type = 1; // Type of move
|
||||
bytes points = 2; // The points of the polygon, or two points if only a line segment (Currently only line segments are used)
|
||||
|
|
|
@ -220,6 +220,9 @@ class CuraEngineBackend(Backend):
|
|||
#
|
||||
# \param job The start slice job that was just finished.
|
||||
def _onStartSliceCompleted(self, job):
|
||||
if self._error_message:
|
||||
self._error_message.hide()
|
||||
|
||||
# Note that cancelled slice jobs can still call this method.
|
||||
if self._start_slice_job is job:
|
||||
self._start_slice_job = None
|
||||
|
@ -227,18 +230,48 @@ class CuraEngineBackend(Backend):
|
|||
if job.isCancelled() or job.getError() or job.getResult() == StartSliceJob.StartJobResult.Error:
|
||||
return
|
||||
|
||||
if job.getResult() == StartSliceJob.StartJobResult.SettingError:
|
||||
if job.getResult() == StartSliceJob.StartJobResult.MaterialIncompatible:
|
||||
if Application.getInstance().getPlatformActivity:
|
||||
self._error_message = Message(catalog.i18nc("@info:status", "Unable to slice. Please check your setting values for errors."))
|
||||
self._error_message = Message(catalog.i18nc("@info:status",
|
||||
"The selected material is incompatible with the selected machine or configuration."))
|
||||
self._error_message.show()
|
||||
self.backendStateChange.emit(BackendState.Error)
|
||||
else:
|
||||
self.backendStateChange.emit(BackendState.NotStarted)
|
||||
return
|
||||
|
||||
if job.getResult() == StartSliceJob.StartJobResult.SettingError:
|
||||
if Application.getInstance().getPlatformActivity:
|
||||
extruders = list(ExtruderManager.getInstance().getMachineExtruders(self._global_container_stack.getId()))
|
||||
error_keys = []
|
||||
for extruder in extruders:
|
||||
error_keys.extend(extruder.getErrorKeys())
|
||||
if not extruders:
|
||||
error_keys = self._global_container_stack.getErrorKeys()
|
||||
error_labels = set()
|
||||
definition_container = self._global_container_stack.getBottom()
|
||||
for key in error_keys:
|
||||
error_labels.add(definition_container.findDefinitions(key = key)[0].label)
|
||||
|
||||
error_labels = ", ".join(error_labels)
|
||||
self._error_message = Message(catalog.i18nc("@info:status", "Unable to slice with the current settings. The following settings have errors: {0}".format(error_labels)))
|
||||
self._error_message.show()
|
||||
self.backendStateChange.emit(BackendState.Error)
|
||||
else:
|
||||
self.backendStateChange.emit(BackendState.NotStarted)
|
||||
return
|
||||
|
||||
if job.getResult() == StartSliceJob.StartJobResult.BuildPlateError:
|
||||
if Application.getInstance().getPlatformActivity:
|
||||
self._error_message = Message(catalog.i18nc("@info:status", "Unable to slice because the prime tower or prime position(s) are invalid."))
|
||||
self._error_message.show()
|
||||
self.backendStateChange.emit(BackendState.Error)
|
||||
else:
|
||||
self.backendStateChange.emit(BackendState.NotStarted)
|
||||
|
||||
if job.getResult() == StartSliceJob.StartJobResult.NothingToSlice:
|
||||
if Application.getInstance().getPlatformActivity:
|
||||
self._error_message = Message(catalog.i18nc("@info:status", "Unable to slice. No suitable models found."))
|
||||
self._error_message = Message(catalog.i18nc("@info:status", "Nothing to slice because none of the models fit the build volume. Please scale or rotate models to fit."))
|
||||
self._error_message.show()
|
||||
self.backendStateChange.emit(BackendState.Error)
|
||||
else:
|
||||
|
@ -286,7 +319,7 @@ class CuraEngineBackend(Backend):
|
|||
self._terminate()
|
||||
|
||||
if error.getErrorCode() not in [Arcus.ErrorCode.BindFailedError, Arcus.ErrorCode.ConnectionResetError, Arcus.ErrorCode.Debug]:
|
||||
Logger.log("e", "A socket error caused the connection to be reset")
|
||||
Logger.log("w", "A socket error caused the connection to be reset")
|
||||
|
||||
## A setting has changed, so check if we must reslice.
|
||||
#
|
||||
|
@ -326,6 +359,7 @@ class CuraEngineBackend(Backend):
|
|||
Logger.log("d", "Slicing took %s seconds", time() - self._slice_start_time )
|
||||
if self._layer_view_active and (self._process_layers_job is None or not self._process_layers_job.isRunning()):
|
||||
self._process_layers_job = ProcessSlicedLayersJob.ProcessSlicedLayersJob(self._stored_optimized_layer_data)
|
||||
self._process_layers_job.finished.connect(self._onProcessLayersFinished)
|
||||
self._process_layers_job.start()
|
||||
self._stored_optimized_layer_data = []
|
||||
|
||||
|
@ -401,6 +435,7 @@ class CuraEngineBackend(Backend):
|
|||
# if we are slicing, there is no need to re-calculate the data as it will be invalid in a moment.
|
||||
if self._stored_optimized_layer_data and not self._slicing:
|
||||
self._process_layers_job = ProcessSlicedLayersJob.ProcessSlicedLayersJob(self._stored_optimized_layer_data)
|
||||
self._process_layers_job.finished.connect(self._onProcessLayersFinished)
|
||||
self._process_layers_job.start()
|
||||
self._stored_optimized_layer_data = []
|
||||
else:
|
||||
|
@ -453,3 +488,5 @@ class CuraEngineBackend(Backend):
|
|||
if self._active_extruder_stack:
|
||||
self._active_extruder_stack.containersChanged.connect(self._onChanged)
|
||||
|
||||
def _onProcessLayersFinished(self, job):
|
||||
self._process_layers_job = None
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# Copyright (c) 2016 Ultimaker B.V.
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
import gc
|
||||
|
||||
from UM.Job import Job
|
||||
from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator
|
||||
from UM.Scene.SceneNode import SceneNode
|
||||
|
@ -64,6 +66,12 @@ class ProcessSlicedLayersJob(Job):
|
|||
self._progress.hide()
|
||||
return
|
||||
|
||||
# Force garbage collection.
|
||||
# For some reason, Python has a tendency to keep the layer data
|
||||
# in memory longer than needed. Forcing the GC to run here makes
|
||||
# sure any old layer data is really cleaned up before adding new.
|
||||
gc.collect()
|
||||
|
||||
mesh = MeshData()
|
||||
layer_data = LayerDataBuilder.LayerDataBuilder()
|
||||
layer_count = len(self._layers)
|
||||
|
|
|
@ -24,6 +24,8 @@ class StartJobResult(IntEnum):
|
|||
Error = 2
|
||||
SettingError = 3
|
||||
NothingToSlice = 4
|
||||
MaterialIncompatible = 5
|
||||
BuildPlateError = 6
|
||||
|
||||
|
||||
## Formatter class that handles token expansion in start/end gcod
|
||||
|
@ -74,10 +76,21 @@ class StartSliceJob(Job):
|
|||
return
|
||||
|
||||
# Don't slice if there is a setting with an error value.
|
||||
if not Application.getInstance().getMachineManager().isActiveStackValid:
|
||||
if Application.getInstance().getMachineManager().stacksHaveErrors:
|
||||
self.setResult(StartJobResult.SettingError)
|
||||
return
|
||||
|
||||
if Application.getInstance().getBuildVolume().hasErrors():
|
||||
self.setResult(StartJobResult.BuildPlateError)
|
||||
return
|
||||
|
||||
for extruder_stack in cura.Settings.ExtruderManager.getInstance().getMachineExtruders(stack.getId()):
|
||||
material = extruder_stack.findContainer({"type": "material"})
|
||||
if material:
|
||||
if material.getMetaDataEntry("compatible") == False:
|
||||
self.setResult(StartJobResult.MaterialIncompatible)
|
||||
return
|
||||
|
||||
# Don't slice if there is a per object setting with an error value.
|
||||
for node in DepthFirstIterator(self._scene.getRoot()):
|
||||
if type(node) is not SceneNode or not node.isSelectable():
|
||||
|
@ -148,7 +161,18 @@ class StartSliceJob(Job):
|
|||
|
||||
obj = group_message.addRepeatedMessage("objects")
|
||||
obj.id = id(object)
|
||||
verts = numpy.array(mesh_data.getVertices())
|
||||
verts = mesh_data.getVertices()
|
||||
indices = mesh_data.getIndices()
|
||||
if indices is not None:
|
||||
#TODO: This is a very slow way of doing it! It also locks up the GUI.
|
||||
flat_vert_list = []
|
||||
for face in indices:
|
||||
for vert_index in face:
|
||||
flat_vert_list.append(verts[vert_index])
|
||||
Job.yieldThread()
|
||||
verts = numpy.array(flat_vert_list)
|
||||
else:
|
||||
verts = numpy.array(verts)
|
||||
|
||||
# Convert from Y up axes to Z up axes. Equals a 90 degree rotation.
|
||||
verts[:, [1, 2]] = verts[:, [2, 1]]
|
||||
|
@ -186,6 +210,9 @@ class StartSliceJob(Job):
|
|||
material_instance_container = stack.findContainer({"type": "material"})
|
||||
|
||||
for key in stack.getAllKeys():
|
||||
# Do not send settings that are not settable_per_extruder.
|
||||
if stack.getProperty(key, "settable_per_extruder") == False:
|
||||
continue
|
||||
setting = message.getMessage("settings").addRepeatedMessage("settings")
|
||||
setting.name = key
|
||||
if key == "material_guid" and material_instance_container:
|
||||
|
@ -206,10 +233,18 @@ class StartSliceJob(Job):
|
|||
# Use resolvement value if available, or take the value
|
||||
resolved_value = stack.getProperty(key, "resolve")
|
||||
if resolved_value is not None:
|
||||
settings[key] = resolved_value
|
||||
# There is a resolvement value. Check if we need to use it.
|
||||
user_container = stack.findContainer({"type": "user"})
|
||||
quality_changes_container = stack.findContainer({"type": "quality_changes"})
|
||||
if user_container.hasProperty(key,"value") or quality_changes_container.hasProperty(key,"value"):
|
||||
# Normal case
|
||||
settings[key] = stack.getProperty(key, "value")
|
||||
else:
|
||||
settings[key] = resolved_value
|
||||
else:
|
||||
# Normal case
|
||||
settings[key] = stack.getProperty(key, "value")
|
||||
Job.yieldThread()
|
||||
|
||||
start_gcode = settings["machine_start_gcode"]
|
||||
settings["material_bed_temp_prepend"] = "{material_bed_temperature}" not in start_gcode #Pre-compute material material_bed_temp_prepend and material_print_temp_prepend
|
||||
|
@ -222,22 +257,24 @@ class StartSliceJob(Job):
|
|||
setting_message.value = self._expandGcodeTokens(key, value, settings)
|
||||
else:
|
||||
setting_message.value = str(value).encode("utf-8")
|
||||
Job.yieldThread()
|
||||
|
||||
## Sends for some settings which extruder they should fallback to if not
|
||||
# set.
|
||||
#
|
||||
# This is only set for settings that have the global_inherits_stack
|
||||
# This is only set for settings that have the limit_to_extruder
|
||||
# property.
|
||||
#
|
||||
# \param stack The global stack with all settings, from which to read the
|
||||
# global_inherits_stack property.
|
||||
# limit_to_extruder property.
|
||||
def _buildGlobalInheritsStackMessage(self, stack):
|
||||
for key in stack.getAllKeys():
|
||||
extruder = int(round(float(stack.getProperty(key, "global_inherits_stack"))))
|
||||
extruder = int(round(float(stack.getProperty(key, "limit_to_extruder"))))
|
||||
if extruder >= 0: #Set to a specific extruder.
|
||||
setting_extruder = self._slice_message.addRepeatedMessage("global_inherits_stack")
|
||||
setting_extruder = self._slice_message.addRepeatedMessage("limit_to_extruder")
|
||||
setting_extruder.name = key
|
||||
setting_extruder.extruder = extruder
|
||||
Job.yieldThread()
|
||||
|
||||
## Check if a node has per object settings and ensure that they are set correctly in the message
|
||||
# \param node \type{SceneNode} Node to check.
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# Copyright (c) 2015 Ultimaker B.V.
|
||||
# Copyright (c) 2016 Ultimaker B.V.
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
import configparser
|
||||
|
||||
import os.path
|
||||
|
||||
from UM import PluginRegistry
|
||||
from UM.Logger import Logger
|
||||
from UM.Settings.InstanceContainer import InstanceContainer # The new profile to make.
|
||||
from cura.ProfileReader import ProfileReader
|
||||
|
@ -26,19 +26,79 @@ class CuraProfileReader(ProfileReader):
|
|||
# not be read or didn't contain a valid profile, \code None \endcode is
|
||||
# returned.
|
||||
def read(self, file_name):
|
||||
archive = zipfile.ZipFile(file_name, "r")
|
||||
results = []
|
||||
for profile_id in archive.namelist():
|
||||
# Create an empty profile.
|
||||
profile = InstanceContainer(profile_id)
|
||||
profile.addMetaDataEntry("type", "quality_changes")
|
||||
serialized = ""
|
||||
with archive.open(profile_id) as f:
|
||||
serialized = f.read()
|
||||
try:
|
||||
profile.deserialize(serialized.decode("utf-8") )
|
||||
except Exception as e: # Parsing error. This is not a (valid) Cura profile then.
|
||||
Logger.log("e", "Error while trying to parse profile: %s", str(e))
|
||||
continue
|
||||
results.append(profile)
|
||||
return results
|
||||
try:
|
||||
with zipfile.ZipFile(file_name, "r") as archive:
|
||||
results = []
|
||||
for profile_id in archive.namelist():
|
||||
with archive.open(profile_id) as f:
|
||||
serialized = f.read()
|
||||
profile = self._loadProfile(serialized.decode("utf-8"), profile_id)
|
||||
if profile is not None:
|
||||
results.append(profile)
|
||||
return results
|
||||
|
||||
except zipfile.BadZipFile:
|
||||
# It must be an older profile from Cura 2.1.
|
||||
with open(file_name, encoding="utf-8") as fhandle:
|
||||
serialized = fhandle.read()
|
||||
return [self._loadProfile(serialized, profile_id) for serialized, profile_id in self._upgradeProfile(serialized, file_name)]
|
||||
|
||||
## Convert a profile from an old Cura to this Cura if needed.
|
||||
#
|
||||
# \param serialized \type{str} The profile data to convert in the serialized on-disk format.
|
||||
# \param profile_id \type{str} The name of the profile.
|
||||
# \return \type{List[Tuple[str,str]]} List of serialized profile strings and matching profile names.
|
||||
def _upgradeProfile(self, serialized, profile_id):
|
||||
parser = configparser.ConfigParser(interpolation=None)
|
||||
parser.read_string(serialized)
|
||||
|
||||
if not "general" in parser:
|
||||
Logger.log("w", "Missing required section 'general'.")
|
||||
return []
|
||||
if not "version" in parser["general"]:
|
||||
Logger.log("w", "Missing required 'version' property")
|
||||
return []
|
||||
|
||||
version = int(parser["general"]["version"])
|
||||
if InstanceContainer.Version != version:
|
||||
name = parser["general"]["name"]
|
||||
return self._upgradeProfileVersion(serialized, name, version)
|
||||
else:
|
||||
return [(serialized, profile_id)]
|
||||
|
||||
## Load a profile from a serialized string.
|
||||
#
|
||||
# \param serialized \type{str} The profile data to read.
|
||||
# \param profile_id \type{str} The name of the profile.
|
||||
# \return \type{InstanceContainer|None}
|
||||
def _loadProfile(self, serialized, profile_id):
|
||||
# Create an empty profile.
|
||||
profile = InstanceContainer(profile_id)
|
||||
profile.addMetaDataEntry("type", "quality_changes")
|
||||
try:
|
||||
profile.deserialize(serialized)
|
||||
except Exception as e: # Parsing error. This is not a (valid) Cura profile then.
|
||||
Logger.log("e", "Error while trying to parse profile: %s", str(e))
|
||||
return None
|
||||
return profile
|
||||
|
||||
## Upgrade a serialized profile to the current profile format.
|
||||
#
|
||||
# \param serialized \type{str} The profile data to convert.
|
||||
# \param profile_id \type{str} The name of the profile.
|
||||
# \param source_version \type{int} The profile version of 'serialized'.
|
||||
# \return \type{List[Tuple[str,str]]} List of serialized profile strings and matching profile names.
|
||||
def _upgradeProfileVersion(self, serialized, profile_id, source_version):
|
||||
converter_plugins = PluginRegistry.getInstance().getAllMetaData(filter={"version_upgrade": {} }, active_only=True)
|
||||
|
||||
source_format = ("profile", source_version)
|
||||
profile_convert_funcs = [plugin["version_upgrade"][source_format][2] for plugin in converter_plugins
|
||||
if source_format in plugin["version_upgrade"] and plugin["version_upgrade"][source_format][1] == InstanceContainer.Version]
|
||||
|
||||
if not profile_convert_funcs:
|
||||
return []
|
||||
|
||||
filenames, outputs = profile_convert_funcs[0](serialized, profile_id)
|
||||
if filenames is None and outputs is None:
|
||||
return []
|
||||
return list(zip(outputs, filenames))
|
||||
|
|
|
@ -70,10 +70,19 @@ class GCodeProfileReader(ProfileReader):
|
|||
|
||||
json_data = json.loads(serialized)
|
||||
|
||||
profile_strings = [json_data["global_quality"]]
|
||||
profile_strings.extend(json_data.get("extruder_quality", []))
|
||||
profiles = []
|
||||
global_profile = readQualityProfileFromString(json_data["global_quality"])
|
||||
|
||||
return [readQualityProfileFromString(profile_string) for profile_string in profile_strings]
|
||||
# This is a fix for profiles created with 2.3.0 For some reason it added the "extruder" property to the
|
||||
# global profile.
|
||||
# The fix is simple and safe, as a global profile should never have the extruder entry.
|
||||
if global_profile.getMetaDataEntry("extruder", None) is not None:
|
||||
global_profile.setMetaDataEntry("extruder", None)
|
||||
profiles.append(global_profile)
|
||||
|
||||
for profile_string in json_data.get("extruder_quality", []):
|
||||
profiles.append(readQualityProfileFromString(profile_string))
|
||||
return profiles
|
||||
|
||||
## Unescape a string which has been escaped for use in a gcode comment.
|
||||
#
|
||||
|
|
|
@ -13,6 +13,7 @@ from UM.Settings.InstanceContainer import InstanceContainer
|
|||
|
||||
import re #For escaping characters in the settings.
|
||||
import json
|
||||
import copy
|
||||
|
||||
## Writes g-code to a file.
|
||||
#
|
||||
|
@ -46,7 +47,17 @@ class GCodeWriter(MeshWriter):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def write(self, stream, node, mode = MeshWriter.OutputMode.TextMode):
|
||||
## Writes the g-code for the entire scene to a stream.
|
||||
#
|
||||
# Note that even though the function accepts a collection of nodes, the
|
||||
# entire scene is always written to the file since it is not possible to
|
||||
# separate the g-code for just specific nodes.
|
||||
#
|
||||
# \param stream The stream to write the g-code to.
|
||||
# \param nodes This is ignored.
|
||||
# \param mode Additional information on how to format the g-code in the
|
||||
# file. This must always be text mode.
|
||||
def write(self, stream, nodes, mode = MeshWriter.OutputMode.TextMode):
|
||||
if mode != MeshWriter.OutputMode.TextMode:
|
||||
Logger.log("e", "GCode Writer does not support non-text mode.")
|
||||
return False
|
||||
|
@ -66,8 +77,11 @@ class GCodeWriter(MeshWriter):
|
|||
## Create a new container with container 2 as base and container 1 written over it.
|
||||
def _createFlattenedContainerInstance(self, instance_container1, instance_container2):
|
||||
flat_container = InstanceContainer(instance_container2.getName())
|
||||
flat_container.setDefinition(instance_container2.getDefinition())
|
||||
flat_container.setMetaData(instance_container2.getMetaData())
|
||||
if instance_container1.getDefinition():
|
||||
flat_container.setDefinition(instance_container1.getDefinition())
|
||||
else:
|
||||
flat_container.setDefinition(instance_container2.getDefinition())
|
||||
flat_container.setMetaData(copy.deepcopy(instance_container2.getMetaData()))
|
||||
|
||||
for key in instance_container2.getAllKeys():
|
||||
flat_container.setProperty(key, "value", instance_container2.getProperty(key, "value"))
|
||||
|
@ -89,23 +103,34 @@ class GCodeWriter(MeshWriter):
|
|||
prefix = ";SETTING_" + str(GCodeWriter.version) + " " # The prefix to put before each line.
|
||||
prefix_length = len(prefix)
|
||||
|
||||
container_with_profile = stack.findContainer({"type": "quality"})
|
||||
container_with_profile = stack.findContainer({"type": "quality_changes"})
|
||||
if not container_with_profile:
|
||||
Logger.log("e", "No valid quality profile found, not writing settings to GCode!")
|
||||
return ""
|
||||
|
||||
flat_global_container = self._createFlattenedContainerInstance(stack.getTop(),container_with_profile)
|
||||
flat_global_container = self._createFlattenedContainerInstance(stack.getTop(), container_with_profile)
|
||||
|
||||
# Ensure that quality_type is set. (Can happen if we have empty quality changes).
|
||||
if flat_global_container.getMetaDataEntry("quality_type", None) is None:
|
||||
flat_global_container.addMetaDataEntry("quality_type", stack.findContainer({"type": "quality"}).getMetaDataEntry("quality_type", "normal"))
|
||||
|
||||
serialized = flat_global_container.serialize()
|
||||
data = {"global_quality": serialized}
|
||||
|
||||
for extruder in ExtruderManager.getInstance().getMachineExtruders(stack.getId()):
|
||||
extruder_quality = extruder.findContainer({"type": "quality"})
|
||||
for extruder in sorted(ExtruderManager.getInstance().getMachineExtruders(stack.getId()), key = lambda k: k.getMetaDataEntry("position")):
|
||||
extruder_quality = extruder.findContainer({"type": "quality_changes"})
|
||||
if not extruder_quality:
|
||||
Logger.log("w", "No extruder quality profile found, not writing quality for extruder %s to file!", extruder.getId())
|
||||
continue
|
||||
|
||||
flat_extruder_quality = self._createFlattenedContainerInstance(extruder.getTop(), extruder_quality)
|
||||
|
||||
# Ensure that extruder is set. (Can happen if we have empty quality changes).
|
||||
if flat_extruder_quality.getMetaDataEntry("extruder", None) is None:
|
||||
flat_extruder_quality.addMetaDataEntry("extruder", extruder.getBottom().getId())
|
||||
|
||||
# Ensure that quality_type is set. (Can happen if we have empty quality changes).
|
||||
if flat_extruder_quality.getMetaDataEntry("quality_type", None) is None:
|
||||
flat_extruder_quality.addMetaDataEntry("quality_type", extruder.findContainer({"type": "quality"}).getMetaDataEntry("quality_type", "normal"))
|
||||
extruder_serialized = flat_extruder_quality.serialize()
|
||||
data.setdefault("extruder_quality", []).append(extruder_serialized)
|
||||
|
||||
|
|
87
plugins/LayerView/LayerPass.py
Normal file
87
plugins/LayerView/LayerPass.py
Normal file
|
@ -0,0 +1,87 @@
|
|||
# Copyright (c) 2016 Ultimaker B.V.
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator
|
||||
from UM.Resources import Resources
|
||||
from UM.Scene.SceneNode import SceneNode
|
||||
from UM.Scene.ToolHandle import ToolHandle
|
||||
from UM.Application import Application
|
||||
from UM.PluginRegistry import PluginRegistry
|
||||
|
||||
from UM.View.RenderPass import RenderPass
|
||||
from UM.View.RenderBatch import RenderBatch
|
||||
from UM.View.GL.OpenGL import OpenGL
|
||||
|
||||
from cura.Settings.ExtruderManager import ExtruderManager
|
||||
|
||||
import os.path
|
||||
|
||||
## RenderPass used to display g-code paths.
|
||||
class LayerPass(RenderPass):
|
||||
def __init__(self, width, height):
|
||||
super().__init__("layerview", width, height)
|
||||
|
||||
self._layer_shader = None
|
||||
self._tool_handle_shader = None
|
||||
self._gl = OpenGL.getInstance().getBindingsObject()
|
||||
self._scene = Application.getInstance().getController().getScene()
|
||||
self._extruder_manager = ExtruderManager.getInstance()
|
||||
|
||||
self._layer_view = None
|
||||
|
||||
def setLayerView(self, layerview):
|
||||
self._layerview = layerview
|
||||
|
||||
def render(self):
|
||||
if not self._layer_shader:
|
||||
self._layer_shader = OpenGL.getInstance().createShaderProgram(os.path.join(PluginRegistry.getInstance().getPluginPath("LayerView"), "layers.shader"))
|
||||
# Use extruder 0 if the extruder manager reports extruder index -1 (for single extrusion printers)
|
||||
self._layer_shader.setUniformValue("u_active_extruder", float(max(0, self._extruder_manager.activeExtruderIndex)))
|
||||
if not self._tool_handle_shader:
|
||||
self._tool_handle_shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "toolhandle.shader"))
|
||||
|
||||
self.bind()
|
||||
|
||||
tool_handle_batch = RenderBatch(self._tool_handle_shader, type = RenderBatch.RenderType.Overlay)
|
||||
|
||||
for node in DepthFirstIterator(self._scene.getRoot()):
|
||||
if isinstance(node, ToolHandle):
|
||||
tool_handle_batch.addItem(node.getWorldTransformation(), mesh = node.getSolidMesh())
|
||||
|
||||
elif isinstance(node, SceneNode) and node.getMeshData() and node.isVisible():
|
||||
layer_data = node.callDecoration("getLayerData")
|
||||
if not layer_data:
|
||||
continue
|
||||
|
||||
# Render all layers below a certain number as line mesh instead of vertices.
|
||||
if self._layerview._current_layer_num - self._layerview._solid_layers > -1 and not self._layerview._only_show_top_layers:
|
||||
start = 0
|
||||
end = 0
|
||||
element_counts = layer_data.getElementCounts()
|
||||
for layer, counts in element_counts.items():
|
||||
if layer + self._layerview._solid_layers > self._layerview._current_layer_num:
|
||||
break
|
||||
end += counts
|
||||
|
||||
# This uses glDrawRangeElements internally to only draw a certain range of lines.
|
||||
batch = RenderBatch(self._layer_shader, type = RenderBatch.RenderType.Solid, mode = RenderBatch.RenderMode.Lines, range = (start, end))
|
||||
batch.addItem(node.getWorldTransformation(), layer_data)
|
||||
batch.render(self._scene.getActiveCamera())
|
||||
|
||||
# Create a new batch that is not range-limited
|
||||
batch = RenderBatch(self._layer_shader, type = RenderBatch.RenderType.Solid)
|
||||
|
||||
if self._layerview._current_layer_mesh:
|
||||
batch.addItem(node.getWorldTransformation(), self._layerview._current_layer_mesh)
|
||||
|
||||
if self._layerview._current_layer_jumps:
|
||||
batch.addItem(node.getWorldTransformation(), self._layerview._current_layer_jumps)
|
||||
|
||||
if len(batch.items) > 0:
|
||||
batch.render(self._scene.getActiveCamera())
|
||||
|
||||
# Render toolhandles on top of the layerview
|
||||
if len(tool_handle_batch.items) > 0:
|
||||
tool_handle_batch.render(self._scene.getActiveCamera())
|
||||
|
||||
self.release()
|
|
@ -1,6 +1,7 @@
|
|||
# Copyright (c) 2015 Ultimaker B.V.
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
from UM.PluginRegistry import PluginRegistry
|
||||
from UM.View.View import View
|
||||
from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator
|
||||
from UM.Resources import Resources
|
||||
|
@ -12,9 +13,11 @@ from UM.Mesh.MeshBuilder import MeshBuilder
|
|||
from UM.Job import Job
|
||||
from UM.Preferences import Preferences
|
||||
from UM.Logger import Logger
|
||||
|
||||
from UM.Scene.SceneNode import SceneNode
|
||||
from UM.View.RenderBatch import RenderBatch
|
||||
from UM.View.GL.OpenGL import OpenGL
|
||||
from UM.Message import Message
|
||||
from UM.Application import Application
|
||||
|
||||
from cura.ConvexHullNode import ConvexHullNode
|
||||
|
||||
|
@ -26,18 +29,16 @@ from . import LayerViewProxy
|
|||
from UM.i18n import i18nCatalog
|
||||
catalog = i18nCatalog("cura")
|
||||
|
||||
from . import LayerPass
|
||||
|
||||
import numpy
|
||||
import os.path
|
||||
|
||||
## View used to display g-code paths.
|
||||
class LayerView(View):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._shader = None
|
||||
self._selection_shader = None
|
||||
self._num_layers = 0
|
||||
self._layer_percentage = 0 # what percentage of layers need to be shown (Slider gives value between 0 - 100)
|
||||
self._proxy = LayerViewProxy.LayerViewProxy()
|
||||
self._controller.getScene().getRoot().childrenChanged.connect(self._onSceneChanged)
|
||||
|
||||
self._max_layers = 0
|
||||
self._current_layer_num = 0
|
||||
self._current_layer_mesh = None
|
||||
|
@ -46,17 +47,40 @@ class LayerView(View):
|
|||
self._activity = False
|
||||
self._old_max_layers = 0
|
||||
|
||||
self._busy = False
|
||||
|
||||
self._ghost_shader = None
|
||||
self._layer_pass = None
|
||||
self._composite_pass = None
|
||||
self._old_layer_bindings = None
|
||||
self._layerview_composite_shader = None
|
||||
self._old_composite_shader = None
|
||||
|
||||
self._global_container_stack = None
|
||||
self._proxy = LayerViewProxy.LayerViewProxy()
|
||||
self._controller.getScene().getRoot().childrenChanged.connect(self._onSceneChanged)
|
||||
|
||||
Preferences.getInstance().addPreference("view/top_layer_count", 5)
|
||||
Preferences.getInstance().addPreference("view/only_show_top_layers", False)
|
||||
Preferences.getInstance().preferenceChanged.connect(self._onPreferencesChanged)
|
||||
|
||||
self._solid_layers = int(Preferences.getInstance().getValue("view/top_layer_count"))
|
||||
self._only_show_top_layers = bool(Preferences.getInstance().getValue("view/only_show_top_layers"))
|
||||
self._busy = False
|
||||
|
||||
self._wireprint_warning_message = Message(catalog.i18nc("@info:status", "Cura does not accurately display layers when Wire Printing is enabled"))
|
||||
|
||||
def getActivity(self):
|
||||
return self._activity
|
||||
|
||||
def getLayerPass(self):
|
||||
if not self._layer_pass:
|
||||
# Currently the RenderPass constructor requires a size > 0
|
||||
# This should be fixed in RenderPass's constructor.
|
||||
self._layer_pass = LayerPass.LayerPass(1, 1)
|
||||
self._layer_pass.setLayerView(self)
|
||||
self.getRenderer().addRenderPass(self._layer_pass)
|
||||
return self._layer_pass
|
||||
|
||||
def getCurrentLayer(self):
|
||||
return self._current_layer_num
|
||||
|
||||
|
@ -84,9 +108,9 @@ class LayerView(View):
|
|||
scene = self.getController().getScene()
|
||||
renderer = self.getRenderer()
|
||||
|
||||
if not self._selection_shader:
|
||||
self._selection_shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "color.shader"))
|
||||
self._selection_shader.setUniformValue("u_color", Color(32, 32, 32, 128))
|
||||
if not self._ghost_shader:
|
||||
self._ghost_shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "color.shader"))
|
||||
self._ghost_shader.setUniformValue("u_color", Color(32, 32, 32, 96))
|
||||
|
||||
for node in DepthFirstIterator(scene.getRoot()):
|
||||
# We do not want to render ConvexHullNode as it conflicts with the bottom layers.
|
||||
|
@ -96,30 +120,7 @@ class LayerView(View):
|
|||
|
||||
if not node.render(renderer):
|
||||
if node.getMeshData() and node.isVisible():
|
||||
if Selection.isSelected(node):
|
||||
renderer.queueNode(node, transparent = True, shader = self._selection_shader)
|
||||
layer_data = node.callDecoration("getLayerData")
|
||||
if not layer_data:
|
||||
continue
|
||||
|
||||
# Render all layers below a certain number as line mesh instead of vertices.
|
||||
if self._current_layer_num - self._solid_layers > -1 and not self._only_show_top_layers:
|
||||
start = 0
|
||||
end = 0
|
||||
element_counts = layer_data.getElementCounts()
|
||||
for layer, counts in element_counts.items():
|
||||
if layer + self._solid_layers > self._current_layer_num:
|
||||
break
|
||||
end += counts
|
||||
|
||||
# This uses glDrawRangeElements internally to only draw a certain range of lines.
|
||||
renderer.queueNode(node, mesh = layer_data, mode = RenderBatch.RenderMode.Lines, range = (start, end))
|
||||
|
||||
if self._current_layer_mesh:
|
||||
renderer.queueNode(node, mesh = self._current_layer_mesh)
|
||||
|
||||
if self._current_layer_jumps:
|
||||
renderer.queueNode(node, mesh = self._current_layer_jumps)
|
||||
renderer.queueNode(node, transparent = True, shader = self._ghost_shader)
|
||||
|
||||
def setLayer(self, value):
|
||||
if self._current_layer_num != value:
|
||||
|
@ -153,7 +154,7 @@ class LayerView(View):
|
|||
|
||||
# The qt slider has a bit of weird behavior that if the maxvalue needs to be changed first
|
||||
# if it's the largest value. If we don't do this, we can have a slider block outside of the
|
||||
# slider.
|
||||
# slider.
|
||||
if new_max_layers > self._current_layer_num:
|
||||
self.maxLayersChanged.emit()
|
||||
self.setLayer(int(self._max_layers))
|
||||
|
@ -166,7 +167,7 @@ class LayerView(View):
|
|||
currentLayerNumChanged = Signal()
|
||||
|
||||
## Hackish way to ensure the proxy is already created, which ensures that the layerview.qml is already created
|
||||
# as this caused some issues.
|
||||
# as this caused some issues.
|
||||
def getProxy(self, engine, script_engine):
|
||||
return self._proxy
|
||||
|
||||
|
@ -184,6 +185,50 @@ class LayerView(View):
|
|||
self.setLayer(self._current_layer_num - 1)
|
||||
return True
|
||||
|
||||
if event.type == Event.ViewActivateEvent:
|
||||
# Make sure the LayerPass is created
|
||||
self.getLayerPass()
|
||||
|
||||
Application.getInstance().globalContainerStackChanged.connect(self._onGlobalStackChanged)
|
||||
self._onGlobalStackChanged()
|
||||
|
||||
if not self._layerview_composite_shader:
|
||||
self._layerview_composite_shader = OpenGL.getInstance().createShaderProgram(os.path.join(PluginRegistry.getInstance().getPluginPath("LayerView"), "layerview_composite.shader"))
|
||||
|
||||
if not self._composite_pass:
|
||||
self._composite_pass = self.getRenderer().getRenderPass("composite")
|
||||
|
||||
self._old_layer_bindings = self._composite_pass.getLayerBindings()[:] # make a copy so we can restore to it later
|
||||
self._composite_pass.getLayerBindings().append("layerview")
|
||||
self._old_composite_shader = self._composite_pass.getCompositeShader()
|
||||
self._composite_pass.setCompositeShader(self._layerview_composite_shader)
|
||||
|
||||
elif event.type == Event.ViewDeactivateEvent:
|
||||
self._wireprint_warning_message.hide()
|
||||
Application.getInstance().globalContainerStackChanged.disconnect(self._onGlobalStackChanged)
|
||||
if self._global_container_stack:
|
||||
self._global_container_stack.propertyChanged.disconnect(self._onPropertyChanged)
|
||||
|
||||
self._composite_pass.setLayerBindings(self._old_layer_bindings)
|
||||
self._composite_pass.setCompositeShader(self._old_composite_shader)
|
||||
|
||||
def _onGlobalStackChanged(self):
|
||||
if self._global_container_stack:
|
||||
self._global_container_stack.propertyChanged.disconnect(self._onPropertyChanged)
|
||||
self._global_container_stack = Application.getInstance().getGlobalContainerStack()
|
||||
if self._global_container_stack:
|
||||
self._global_container_stack.propertyChanged.connect(self._onPropertyChanged)
|
||||
self._onPropertyChanged("wireframe_enabled", "value")
|
||||
else:
|
||||
self._wireprint_warning_message.hide()
|
||||
|
||||
def _onPropertyChanged(self, key, property_name):
|
||||
if key == "wireframe_enabled" and property_name == "value":
|
||||
if self._global_container_stack.getProperty("wireframe_enabled", "value"):
|
||||
self._wireprint_warning_message.show()
|
||||
else:
|
||||
self._wireprint_warning_message.hide()
|
||||
|
||||
def _startUpdateTopLayers(self):
|
||||
if self._top_layers_job:
|
||||
self._top_layers_job.finished.disconnect(self._updateCurrentLayerMesh)
|
||||
|
@ -278,3 +323,4 @@ class _CreateTopLayersJob(Job):
|
|||
def cancel(self):
|
||||
self._cancel = True
|
||||
super().cancel()
|
||||
|
||||
|
|
35
plugins/LayerView/layers.shader
Normal file
35
plugins/LayerView/layers.shader
Normal file
|
@ -0,0 +1,35 @@
|
|||
[shaders]
|
||||
vertex =
|
||||
uniform highp mat4 u_modelViewProjectionMatrix;
|
||||
uniform lowp float u_active_extruder;
|
||||
uniform lowp float u_shade_factor;
|
||||
|
||||
attribute highp vec4 a_vertex;
|
||||
attribute lowp vec4 a_color;
|
||||
varying lowp vec4 v_color;
|
||||
void main()
|
||||
{
|
||||
gl_Position = u_modelViewProjectionMatrix * a_vertex;
|
||||
// shade the color depending on the extruder index stored in the alpha component of the color
|
||||
v_color = (a_color.a == u_active_extruder) ? a_color : a_color * u_shade_factor;
|
||||
v_color.a = 1.0;
|
||||
}
|
||||
|
||||
fragment =
|
||||
varying lowp vec4 v_color;
|
||||
|
||||
void main()
|
||||
{
|
||||
gl_FragColor = v_color;
|
||||
}
|
||||
|
||||
[defaults]
|
||||
u_active_extruder = 0.0
|
||||
u_shade_factor = 0.60
|
||||
|
||||
[bindings]
|
||||
u_modelViewProjectionMatrix = model_view_projection_matrix
|
||||
|
||||
[attributes]
|
||||
a_vertex = vertex
|
||||
a_color = color
|
78
plugins/LayerView/layerview_composite.shader
Normal file
78
plugins/LayerView/layerview_composite.shader
Normal file
|
@ -0,0 +1,78 @@
|
|||
[shaders]
|
||||
vertex =
|
||||
uniform highp mat4 u_modelViewProjectionMatrix;
|
||||
attribute highp vec4 a_vertex;
|
||||
attribute highp vec2 a_uvs;
|
||||
|
||||
varying highp vec2 v_uvs;
|
||||
|
||||
void main()
|
||||
{
|
||||
gl_Position = u_modelViewProjectionMatrix * a_vertex;
|
||||
v_uvs = a_uvs;
|
||||
}
|
||||
|
||||
fragment =
|
||||
uniform sampler2D u_layer0;
|
||||
uniform sampler2D u_layer1;
|
||||
uniform sampler2D u_layer2;
|
||||
|
||||
uniform vec2 u_offset[9];
|
||||
|
||||
uniform vec4 u_background_color;
|
||||
uniform float u_outline_strength;
|
||||
uniform vec4 u_outline_color;
|
||||
|
||||
varying vec2 v_uvs;
|
||||
|
||||
float kernel[9];
|
||||
|
||||
const vec3 x_axis = vec3(1.0, 0.0, 0.0);
|
||||
const vec3 y_axis = vec3(0.0, 1.0, 0.0);
|
||||
const vec3 z_axis = vec3(0.0, 0.0, 1.0);
|
||||
|
||||
void main()
|
||||
{
|
||||
kernel[0] = 0.0; kernel[1] = 1.0; kernel[2] = 0.0;
|
||||
kernel[3] = 1.0; kernel[4] = -4.0; kernel[5] = 1.0;
|
||||
kernel[6] = 0.0; kernel[7] = 1.0; kernel[8] = 0.0;
|
||||
|
||||
vec4 result = u_background_color;
|
||||
|
||||
vec4 main_layer = texture2D(u_layer0, v_uvs);
|
||||
vec4 selection_layer = texture2D(u_layer1, v_uvs);
|
||||
vec4 layerview_layer = texture2D(u_layer2, v_uvs);
|
||||
|
||||
result = main_layer * main_layer.a + result * (1.0 - main_layer.a);
|
||||
result = layerview_layer * layerview_layer.a + result * (1.0 - layerview_layer.a);
|
||||
|
||||
vec4 sum = vec4(0.0);
|
||||
for (int i = 0; i < 9; i++)
|
||||
{
|
||||
vec4 color = vec4(texture2D(u_layer1, v_uvs.xy + u_offset[i]).a);
|
||||
sum += color * (kernel[i] / u_outline_strength);
|
||||
}
|
||||
|
||||
if((selection_layer.rgb == x_axis || selection_layer.rgb == y_axis || selection_layer.rgb == z_axis))
|
||||
{
|
||||
gl_FragColor = result;
|
||||
}
|
||||
else
|
||||
{
|
||||
gl_FragColor = mix(result, u_outline_color, abs(sum.a));
|
||||
}
|
||||
}
|
||||
|
||||
[defaults]
|
||||
u_layer0 = 0
|
||||
u_layer1 = 1
|
||||
u_layer2 = 2
|
||||
u_background_color = [0.965, 0.965, 0.965, 1.0]
|
||||
u_outline_strength = 1.0
|
||||
u_outline_color = [0.05, 0.66, 0.89, 1.0]
|
||||
|
||||
[bindings]
|
||||
|
||||
[attributes]
|
||||
a_vertex = vertex
|
||||
a_uvs = uv
|
|
@ -70,7 +70,8 @@
|
|||
"magic_spiralize": "spiralize",
|
||||
"prime_tower_enable": "wipe_tower",
|
||||
"prime_tower_size": "math.sqrt(float(wipe_tower_volume) / float(layer_height))",
|
||||
"ooze_shield_enabled": "ooze_shield"
|
||||
"ooze_shield_enabled": "ooze_shield",
|
||||
"skin_overlap": "fill_overlap"
|
||||
},
|
||||
|
||||
"defaults": {
|
||||
|
|
|
@ -66,8 +66,17 @@ class LegacyProfileReader(ProfileReader):
|
|||
def read(self, file_name):
|
||||
if file_name.split(".")[-1] != "ini":
|
||||
return None
|
||||
global_container_stack = Application.getInstance().getGlobalContainerStack()
|
||||
if not global_container_stack:
|
||||
return None
|
||||
|
||||
multi_extrusion = global_container_stack.getProperty("machine_extruder_count", "value") > 1
|
||||
if multi_extrusion:
|
||||
Logger.log("e", "Unable to import legacy profile %s. Multi extrusion is not supported", file_name)
|
||||
raise Exception("Unable to import legacy profile. Multi extrusion is not supported")
|
||||
|
||||
Logger.log("i", "Importing legacy profile from file " + file_name + ".")
|
||||
profile = InstanceContainer("Imported Legacy Profile") #Create an empty profile.
|
||||
profile = InstanceContainer("Imported Legacy Profile") # Create an empty profile.
|
||||
|
||||
parser = configparser.ConfigParser(interpolation = None)
|
||||
try:
|
||||
|
@ -111,7 +120,7 @@ class LegacyProfileReader(ProfileReader):
|
|||
if "translation" not in dict_of_doom:
|
||||
Logger.log("e", "Dictionary of Doom has no translation. Is it the correct JSON file?")
|
||||
return None
|
||||
current_printer_definition = Application.getInstance().getGlobalContainerStack().getBottom()
|
||||
current_printer_definition = global_container_stack.getBottom()
|
||||
profile.setDefinition(current_printer_definition)
|
||||
for new_setting in dict_of_doom["translation"]: #Evaluate all new settings that would get a value from the translations.
|
||||
old_setting_expression = dict_of_doom["translation"][new_setting]
|
||||
|
@ -130,5 +139,6 @@ class LegacyProfileReader(ProfileReader):
|
|||
if len(profile.getAllKeys()) == 0:
|
||||
Logger.log("i", "A legacy profile was imported but everything evaluates to the defaults, creating an empty profile.")
|
||||
profile.setDirty(True)
|
||||
profile.addMetaDataEntry("type", "quality")
|
||||
profile.addMetaDataEntry("type", "quality_changes")
|
||||
profile.addMetaDataEntry("quality_type", "normal")
|
||||
return profile
|
|
@ -77,7 +77,7 @@ Cura.MachineAction
|
|||
{
|
||||
id: buildAreaWidthField
|
||||
text: machineWidthProvider.properties.value
|
||||
validator: RegExpValidator { regExp: /[0-9]{0,6}/ }
|
||||
validator: RegExpValidator { regExp: /[0-9\.]{0,6}/ }
|
||||
onEditingFinished: { machineWidthProvider.setPropertyValue("value", text); manager.forceUpdate() }
|
||||
}
|
||||
Label
|
||||
|
@ -93,7 +93,7 @@ Cura.MachineAction
|
|||
{
|
||||
id: buildAreaDepthField
|
||||
text: machineDepthProvider.properties.value
|
||||
validator: RegExpValidator { regExp: /[0-9]{0,6}/ }
|
||||
validator: RegExpValidator { regExp: /[0-9\.]{0,6}/ }
|
||||
onEditingFinished: { machineDepthProvider.setPropertyValue("value", text); manager.forceUpdate() }
|
||||
}
|
||||
Label
|
||||
|
@ -109,7 +109,7 @@ Cura.MachineAction
|
|||
{
|
||||
id: buildAreaHeightField
|
||||
text: machineHeightProvider.properties.value
|
||||
validator: RegExpValidator { regExp: /[0-9]{0,6}/ }
|
||||
validator: RegExpValidator { regExp: /[0-9\.]{0,6}/ }
|
||||
onEditingFinished: { machineHeightProvider.setPropertyValue("value", text); manager.forceUpdate() }
|
||||
}
|
||||
Label
|
||||
|
@ -147,8 +147,16 @@ Cura.MachineAction
|
|||
|
||||
ComboBox
|
||||
{
|
||||
model: ["RepRap (Marlin/Sprinter)", "UltiGCode"]
|
||||
currentIndex: machineGCodeFlavorProvider.properties.value != model[1] ? 0 : 1
|
||||
model: ["RepRap (Marlin/Sprinter)", "UltiGCode", "Repetier"]
|
||||
currentIndex:
|
||||
{
|
||||
var index = model.indexOf(machineGCodeFlavorProvider.properties.value);
|
||||
if(index == -1)
|
||||
{
|
||||
index = 0;
|
||||
}
|
||||
return index
|
||||
}
|
||||
onActivated:
|
||||
{
|
||||
machineGCodeFlavorProvider.setPropertyValue("value", model[index]);
|
||||
|
@ -182,7 +190,7 @@ Cura.MachineAction
|
|||
{
|
||||
id: printheadXMinField
|
||||
text: getHeadPolygonCoord("x", "min")
|
||||
validator: RegExpValidator { regExp: /[0-9]{0,6}/ }
|
||||
validator: RegExpValidator { regExp: /[0-9\.]{0,6}/ }
|
||||
onEditingFinished: setHeadPolygon()
|
||||
}
|
||||
Label
|
||||
|
@ -198,7 +206,7 @@ Cura.MachineAction
|
|||
{
|
||||
id: printheadYMinField
|
||||
text: getHeadPolygonCoord("y", "min")
|
||||
validator: RegExpValidator { regExp: /[0-9]{0,6}/ }
|
||||
validator: RegExpValidator { regExp: /[0-9\.]{0,6}/ }
|
||||
onEditingFinished: setHeadPolygon()
|
||||
}
|
||||
Label
|
||||
|
@ -214,7 +222,7 @@ Cura.MachineAction
|
|||
{
|
||||
id: printheadXMaxField
|
||||
text: getHeadPolygonCoord("x", "max")
|
||||
validator: RegExpValidator { regExp: /[0-9]{0,6}/ }
|
||||
validator: RegExpValidator { regExp: /[0-9\.]{0,6}/ }
|
||||
onEditingFinished: setHeadPolygon()
|
||||
}
|
||||
Label
|
||||
|
@ -230,7 +238,7 @@ Cura.MachineAction
|
|||
{
|
||||
id: printheadYMaxField
|
||||
text: getHeadPolygonCoord("y", "max")
|
||||
validator: RegExpValidator { regExp: /[0-9]{0,6}/ }
|
||||
validator: RegExpValidator { regExp: /[0-9\.]{0,6}/ }
|
||||
onEditingFinished: setHeadPolygon()
|
||||
}
|
||||
Label
|
||||
|
|
|
@ -55,21 +55,30 @@ class PerObjectSettingVisibilityHandler(UM.Settings.Models.SettingVisibilityHand
|
|||
|
||||
# Add all instances that are not added, but are in visibility list
|
||||
for item in visible:
|
||||
if not settings.getInstance(item):
|
||||
if not settings.getInstance(item): # Setting was not added already.
|
||||
definition = self._stack.getSettingDefinition(item)
|
||||
if definition:
|
||||
new_instance = SettingInstance(definition, settings)
|
||||
stack_nr = -1
|
||||
if definition.global_inherits_stack and self._stack.getProperty("machine_extruder_count", "value") > 1:
|
||||
#Obtain the value from the correct container stack. Only once, upon adding the setting.
|
||||
stack_nr = str(int(round(float(self._stack.getProperty(item, "global_inherits_stack"))))) #Stack to get the setting from. Round it and remove the fractional part.
|
||||
if stack_nr not in ExtruderManager.getInstance().extruderIds and self._stack.getProperty("extruder_nr", "value"): #Property not defined, but we have an extruder number.
|
||||
stack_nr = str(int(round(float(self._stack.getProperty("extruder_nr", "value")))))
|
||||
if stack_nr in ExtruderManager.getInstance().extruderIds: #We have either a global_inherits_stack or an extruder_nr.
|
||||
stack = None
|
||||
# Check from what stack we should copy the raw property of the setting from.
|
||||
if definition.limit_to_extruder != "-1" and self._stack.getProperty("machine_extruder_count", "value") > 1:
|
||||
# A limit to extruder function was set and it's a multi extrusion machine. Check what stack we do need to use.
|
||||
stack_nr = str(int(round(float(self._stack.getProperty(item, "limit_to_extruder")))))
|
||||
|
||||
# Check if the found stack_number is in the extruder list of extruders.
|
||||
if stack_nr not in ExtruderManager.getInstance().extruderIds and self._stack.getProperty("extruder_nr", "value") is not None:
|
||||
stack_nr = -1
|
||||
|
||||
# Use the found stack number to get the right stack to copy the value from.
|
||||
if stack_nr in ExtruderManager.getInstance().extruderIds:
|
||||
stack = UM.Settings.ContainerRegistry.getInstance().findContainerStacks(id = ExtruderManager.getInstance().extruderIds[stack_nr])[0]
|
||||
|
||||
# Use the raw property to set the value (so the inheritance doesn't break)
|
||||
if stack is not None:
|
||||
new_instance.setProperty("value", stack.getRawProperty(item, "value"))
|
||||
else:
|
||||
stack = UM.Application.getInstance().getGlobalContainerStack()
|
||||
new_instance.setProperty("value", stack.getProperty(item, "value"))
|
||||
new_instance.setProperty("value", None)
|
||||
new_instance.resetState() # Ensure that the state is not seen as a user state.
|
||||
settings.addInstance(new_instance)
|
||||
visibility_changed = True
|
||||
|
|
|
@ -31,7 +31,7 @@ Item {
|
|||
spacing: UM.Theme.getSize("default_margin").width
|
||||
Label
|
||||
{
|
||||
text: catalog.i18nc("@label", "Print model with")
|
||||
text: catalog.i18nc("@label Followed by extruder selection drop-down.", "Print model with")
|
||||
anchors.verticalCenter: extruderSelector.verticalCenter
|
||||
|
||||
color: UM.Theme.getColor("setting_control_text")
|
||||
|
@ -44,13 +44,11 @@ Item {
|
|||
|
||||
model: Cura.ExtrudersModel
|
||||
{
|
||||
id: extruders_model
|
||||
onRowsInserted: extruderSelector.visible = extruders_model.rowCount() > 1
|
||||
onModelReset: extruderSelector.visible = extruders_model.rowCount() > 1
|
||||
onModelChanged: extruderSelector.color = extruders_model.getItem(extruderSelector.currentIndex).color
|
||||
id: extrudersModel
|
||||
onModelChanged: extruderSelector.color = extrudersModel.getItem(extruderSelector.currentIndex).color
|
||||
}
|
||||
property string color: extruders_model.getItem(extruderSelector.currentIndex).color
|
||||
visible: extruders_model.rowCount() > 1
|
||||
property string color: extrudersModel.getItem(extruderSelector.currentIndex).color
|
||||
visible: machineExtruderCount.properties.value > 1
|
||||
textRole: "name"
|
||||
width: UM.Theme.getSize("setting_control").width
|
||||
height: UM.Theme.getSize("section").height
|
||||
|
@ -130,19 +128,19 @@ Item {
|
|||
|
||||
onActivated:
|
||||
{
|
||||
UM.ActiveTool.setProperty("SelectedActiveExtruder", extruders_model.getItem(index).id);
|
||||
extruderSelector.color = extruders_model.getItem(index).color;
|
||||
UM.ActiveTool.setProperty("SelectedActiveExtruder", extrudersModel.getItem(index).id);
|
||||
extruderSelector.color = extrudersModel.getItem(index).color;
|
||||
}
|
||||
onModelChanged: updateCurrentIndex();
|
||||
|
||||
function updateCurrentIndex()
|
||||
{
|
||||
for(var i = 0; i < extruders_model.rowCount(); ++i)
|
||||
for(var i = 0; i < extrudersModel.rowCount(); ++i)
|
||||
{
|
||||
if(extruders_model.getItem(i).id == UM.ActiveTool.properties.getValue("SelectedActiveExtruder"))
|
||||
if(extrudersModel.getItem(i).id == UM.ActiveTool.properties.getValue("SelectedActiveExtruder"))
|
||||
{
|
||||
extruderSelector.currentIndex = i;
|
||||
extruderSelector.color = extruders_model.getItem(i).color;
|
||||
extruderSelector.color = extrudersModel.getItem(i).color;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -153,10 +151,11 @@ Item {
|
|||
|
||||
Column
|
||||
{
|
||||
spacing: UM.Theme.getSize("default_lining").height
|
||||
// This is to ensure that the panel is first increasing in size up to 200 and then shows a scrollbar.
|
||||
// It kinda looks ugly otherwise (big panel, no content on it)
|
||||
height: contents.count * UM.Theme.getSize("section").height < 200 ? contents.count * UM.Theme.getSize("section").height : 200
|
||||
property int maximumHeight: 200 * Screen.devicePixelRatio
|
||||
height: Math.min(contents.count * (UM.Theme.getSize("section").height + UM.Theme.getSize("default_lining").height), maximumHeight)
|
||||
|
||||
ScrollView
|
||||
{
|
||||
height: parent.height
|
||||
|
@ -165,6 +164,7 @@ Item {
|
|||
ListView
|
||||
{
|
||||
id: contents
|
||||
spacing: UM.Theme.getSize("default_lining").height
|
||||
|
||||
model: UM.SettingDefinitionsModel
|
||||
{
|
||||
|
@ -262,6 +262,14 @@ Item {
|
|||
storeIndex: 0
|
||||
removeUnusedValue: false
|
||||
}
|
||||
|
||||
// If the extruder by which the object needs to be printed is changed, ensure that the
|
||||
// display is also notified of the fact.
|
||||
Connections
|
||||
{
|
||||
target: extruderSelector
|
||||
onActivated: provider.forcePropertiesChanged()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -393,7 +401,7 @@ Item {
|
|||
}
|
||||
visibilityHandler: UM.SettingPreferenceVisibilityHandler {}
|
||||
expanded: [ "*" ]
|
||||
exclude: [ "machine_settings" ]
|
||||
exclude: [ "machine_settings", "command_line_settings" ]
|
||||
}
|
||||
delegate:Loader
|
||||
{
|
||||
|
@ -430,6 +438,16 @@ Item {
|
|||
]
|
||||
}
|
||||
|
||||
UM.SettingPropertyProvider
|
||||
{
|
||||
id: machineExtruderCount
|
||||
|
||||
containerStackId: Cura.MachineManager.activeMachineId
|
||||
key: "machine_extruder_count"
|
||||
watchedProperties: [ "value" ]
|
||||
storeIndex: 0
|
||||
}
|
||||
|
||||
SystemPalette { id: palette; }
|
||||
|
||||
Component
|
||||
|
|
|
@ -7,6 +7,8 @@ from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator
|
|||
from UM.Application import Application
|
||||
from UM.Preferences import Preferences
|
||||
from cura.Settings.SettingOverrideDecorator import SettingOverrideDecorator
|
||||
from cura.Settings.ExtruderManager import ExtruderManager
|
||||
from UM.Event import Event
|
||||
|
||||
|
||||
## This tool allows the user to add & change settings per node in the scene.
|
||||
|
@ -20,6 +22,7 @@ class PerObjectSettingsTool(Tool):
|
|||
|
||||
self._advanced_mode = False
|
||||
self._multi_extrusion = False
|
||||
self._single_model_selected = False
|
||||
|
||||
Selection.selectionChanged.connect(self.propertyChanged)
|
||||
|
||||
|
@ -28,8 +31,13 @@ class PerObjectSettingsTool(Tool):
|
|||
|
||||
Application.getInstance().globalContainerStackChanged.connect(self._onGlobalContainerChanged)
|
||||
self._onGlobalContainerChanged()
|
||||
Selection.selectionChanged.connect(self._updateEnabled)
|
||||
|
||||
|
||||
def event(self, event):
|
||||
super().event(event)
|
||||
if event.type == Event.MousePressEvent and self._controller.getToolsEnabled():
|
||||
self.operationStopped.emit(self)
|
||||
return False
|
||||
|
||||
def getSelectedObjectId(self):
|
||||
|
@ -71,12 +79,37 @@ class PerObjectSettingsTool(Tool):
|
|||
global_container_stack = Application.getInstance().getGlobalContainerStack()
|
||||
if global_container_stack:
|
||||
self._multi_extrusion = global_container_stack.getProperty("machine_extruder_count", "value") > 1
|
||||
|
||||
# Ensure that all extruder data is reset
|
||||
if not self._multi_extrusion:
|
||||
# Ensure that all extruder data is reset
|
||||
root_node = Application.getInstance().getController().getScene().getRoot()
|
||||
for node in DepthFirstIterator(root_node):
|
||||
node.callDecoration("setActiveExtruder", global_container_stack.getId())
|
||||
default_stack_id = global_container_stack.getId()
|
||||
else:
|
||||
default_stack = ExtruderManager.getInstance().getExtruderStack(0)
|
||||
if default_stack:
|
||||
default_stack_id = default_stack.getId()
|
||||
else:
|
||||
default_stack_id = global_container_stack.getId()
|
||||
|
||||
root_node = Application.getInstance().getController().getScene().getRoot()
|
||||
for node in DepthFirstIterator(root_node):
|
||||
new_stack_id = default_stack_id
|
||||
# Get position of old extruder stack for this node
|
||||
old_extruder_pos = node.callDecoration("getActiveExtruderPosition")
|
||||
if old_extruder_pos is not None:
|
||||
# Fetch current (new) extruder stack at position
|
||||
new_stack = ExtruderManager.getInstance().getExtruderStack(old_extruder_pos)
|
||||
if new_stack:
|
||||
new_stack_id = new_stack.getId()
|
||||
node.callDecoration("setActiveExtruder", new_stack_id)
|
||||
|
||||
self._updateEnabled()
|
||||
|
||||
def _updateEnabled(self):
|
||||
Application.getInstance().getController().toolEnabledChanged.emit(self._plugin_id, self._advanced_mode or self._multi_extrusion)
|
||||
selected_objects = Selection.getAllSelectedObjects()
|
||||
if len(selected_objects)> 1:
|
||||
self._single_model_selected = False
|
||||
elif len(selected_objects) == 1 and selected_objects[0].callDecoration("isGroup"):
|
||||
self._single_model_selected = False # Group is selected, so tool needs to be disabled
|
||||
else:
|
||||
self._single_model_selected = True
|
||||
Application.getInstance().getController().toolEnabledChanged.emit(self._plugin_id, (self._advanced_mode or self._multi_extrusion) and self._single_model_selected)
|
|
@ -1,3 +1,6 @@
|
|||
# Copyright (c) 2016 Ultimaker B.V.
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
import os.path
|
||||
|
||||
from UM.Application import Application
|
||||
|
@ -17,14 +20,24 @@ class RemovableDriveOutputDevice(OutputDevice):
|
|||
super().__init__(device_id)
|
||||
|
||||
self.setName(device_name)
|
||||
self.setShortDescription(catalog.i18nc("@action:button", "Save to Removable Drive"))
|
||||
self.setShortDescription(catalog.i18nc("@action:button Preceded by 'Ready to'.", "Save to Removable Drive"))
|
||||
self.setDescription(catalog.i18nc("@item:inlistbox", "Save to Removable Drive {0}").format(device_name))
|
||||
self.setIconName("save_sd")
|
||||
self.setPriority(1)
|
||||
|
||||
self._writing = False
|
||||
self._stream = None
|
||||
|
||||
def requestWrite(self, node, file_name = None, filter_by_machine = False):
|
||||
## Request the specified nodes to be written to the removable drive.
|
||||
#
|
||||
# \param nodes A collection of scene nodes that should be written to the
|
||||
# removable drive.
|
||||
# \param file_name \type{string} A suggestion for the file name to write
|
||||
# to. If none is provided, a file name will be made from the names of the
|
||||
# meshes.
|
||||
# \param limit_mimetypes Should we limit the available MIME types to the
|
||||
# MIME types available to the currently active machine?
|
||||
def requestWrite(self, nodes, file_name = None, filter_by_machine = False):
|
||||
filter_by_machine = True # This plugin is indended to be used by machine (regardless of what it was told to do)
|
||||
if self._writing:
|
||||
raise OutputDeviceError.DeviceBusyError()
|
||||
|
@ -49,15 +62,7 @@ class RemovableDriveOutputDevice(OutputDevice):
|
|||
extension = file_formats[0]["extension"]
|
||||
|
||||
if file_name is None:
|
||||
for n in BreadthFirstIterator(node):
|
||||
if n.getMeshData():
|
||||
file_name = n.getName()
|
||||
if file_name:
|
||||
break
|
||||
|
||||
if not file_name:
|
||||
Logger.log("e", "Could not determine a proper file name when trying to write to %s, aborting", self.getName())
|
||||
raise OutputDeviceError.WriteRequestFailedError()
|
||||
file_name = self._automaticFileName(nodes)
|
||||
|
||||
if extension: # Not empty string.
|
||||
extension = "." + extension
|
||||
|
@ -65,8 +70,9 @@ class RemovableDriveOutputDevice(OutputDevice):
|
|||
|
||||
try:
|
||||
Logger.log("d", "Writing to %s", file_name)
|
||||
stream = open(file_name, "wt")
|
||||
job = WriteMeshJob(writer, stream, node, MeshWriter.OutputMode.TextMode)
|
||||
# Using buffering greatly reduces the write time for many lines of gcode
|
||||
self._stream = open(file_name, "wt", buffering = 1, encoding = "utf-8")
|
||||
job = WriteMeshJob(writer, self._stream, nodes, MeshWriter.OutputMode.TextMode)
|
||||
job.setFileName(file_name)
|
||||
job.progress.connect(self._onProgress)
|
||||
job.finished.connect(self._onFinished)
|
||||
|
@ -86,12 +92,33 @@ class RemovableDriveOutputDevice(OutputDevice):
|
|||
Logger.log("e", "Operating system would not let us write to %s: %s", file_name, str(e))
|
||||
raise OutputDeviceError.WriteRequestFailedError(catalog.i18nc("@info:status", "Could not save to <filename>{0}</filename>: <message>{1}</message>").format(file_name, str(e))) from e
|
||||
|
||||
## Generate a file name automatically for the specified nodes to be saved
|
||||
# in.
|
||||
#
|
||||
# The name generated will be the name of one of the nodes. Which node that
|
||||
# is can not be guaranteed.
|
||||
#
|
||||
# \param nodes A collection of nodes for which to generate a file name.
|
||||
def _automaticFileName(self, nodes):
|
||||
for root in nodes:
|
||||
for child in BreadthFirstIterator(root):
|
||||
if child.getMeshData():
|
||||
name = child.getName()
|
||||
if name:
|
||||
return name
|
||||
raise OutputDeviceError.WriteRequestFailedError("Could not find a file name when trying to write to {device}.".format(device = self.getName()))
|
||||
|
||||
def _onProgress(self, job, progress):
|
||||
if hasattr(job, "_message"):
|
||||
job._message.setProgress(progress)
|
||||
self.writeProgress.emit(self, progress)
|
||||
|
||||
def _onFinished(self, job):
|
||||
if self._stream:
|
||||
# Explicitly closing the stream flushes the write-buffer
|
||||
self._stream.close()
|
||||
self._stream = None
|
||||
|
||||
if hasattr(job, "_message"):
|
||||
job._message.hide()
|
||||
job._message = None
|
||||
|
@ -113,4 +140,9 @@ class RemovableDriveOutputDevice(OutputDevice):
|
|||
def _onActionTriggered(self, message, action):
|
||||
if action == "eject":
|
||||
if Application.getInstance().getOutputDeviceManager().getOutputDevicePlugin("RemovableDriveOutputDevice").ejectDevice(self):
|
||||
message.hide()
|
||||
message.hide()
|
||||
|
||||
eject_message = Message(catalog.i18nc("@info:status", "Ejected {0}. You can now safely remove the drive.").format(self.getName()))
|
||||
else:
|
||||
eject_message = Message(catalog.i18nc("@info:status", "Failed to eject {0}. Another program may be using the drive.").format(self.getName()))
|
||||
eject_message.show()
|
||||
|
|
|
@ -46,11 +46,6 @@ class RemovableDrivePlugin(OutputDevicePlugin):
|
|||
|
||||
if result:
|
||||
Logger.log("i", "Succesfully ejected the device")
|
||||
message = Message(catalog.i18nc("@info:status", "Ejected {0}. You can now safely remove the drive.").format(device.getName()))
|
||||
message.show()
|
||||
else:
|
||||
message = Message(catalog.i18nc("@info:status", "Failed to eject {0}. Maybe it is still in use?").format(device.getName()))
|
||||
message.show()
|
||||
return result
|
||||
|
||||
def performEjectDevice(self, device):
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# Copyright (c) 2015 Ultimaker B.V.
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
from cura.CuraApplication import CuraApplication
|
||||
|
||||
from UM.Extension import Extension
|
||||
from UM.Application import Application
|
||||
from UM.Preferences import Preferences
|
||||
|
@ -18,6 +20,8 @@ import math
|
|||
import urllib.request
|
||||
import urllib.parse
|
||||
import ssl
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
catalog = i18nCatalog("cura")
|
||||
|
||||
|
@ -43,9 +47,11 @@ class SliceInfoJob(Job):
|
|||
if Platform.isOSX():
|
||||
kwoptions["context"] = ssl._create_unverified_context()
|
||||
|
||||
Logger.log("d", "Sending anonymous slice info to [%s]...", self.url)
|
||||
|
||||
try:
|
||||
f = urllib.request.urlopen(self.url, **kwoptions)
|
||||
Logger.log("i", "Sent anonymous slice info to %s", self.url)
|
||||
Logger.log("i", "Sent anonymous slice info.")
|
||||
f.close()
|
||||
except urllib.error.HTTPError as http_exception:
|
||||
Logger.log("e", "An HTTP error occurred while trying to send slice information: %s" % http_exception)
|
||||
|
@ -65,7 +71,7 @@ class SliceInfo(Extension):
|
|||
Preferences.getInstance().addPreference("info/asked_send_slice_info", False)
|
||||
|
||||
if not Preferences.getInstance().getValue("info/asked_send_slice_info"):
|
||||
self.send_slice_info_message = Message(catalog.i18nc("@info", "Cura automatically sends slice info. You can disable this in preferences"), lifetime = 0, dismissable = False)
|
||||
self.send_slice_info_message = Message(catalog.i18nc("@info", "Cura collects anonymised slicing statistics. You can disable this in preferences"), lifetime = 0, dismissable = False)
|
||||
self.send_slice_info_message.addAction("Dismiss", catalog.i18nc("@action:button", "Dismiss"), None, "")
|
||||
self.send_slice_info_message.actionTriggered.connect(self.messageActionTriggered)
|
||||
self.send_slice_info_message.show()
|
||||
|
@ -80,48 +86,27 @@ class SliceInfo(Extension):
|
|||
Logger.log("d", "'info/send_slice_info' is turned off.")
|
||||
return # Do nothing, user does not want to send data
|
||||
|
||||
# Listing all files placed on the buildplate
|
||||
modelhashes = []
|
||||
for node in DepthFirstIterator(CuraApplication.getInstance().getController().getScene().getRoot()):
|
||||
if type(node) is not SceneNode or not node.getMeshData():
|
||||
continue
|
||||
modelhashes.append(node.getMeshData().getHash())
|
||||
|
||||
# Creating md5sums and formatting them as discussed on JIRA
|
||||
modelhash_formatted = ",".join(modelhashes)
|
||||
|
||||
global_container_stack = Application.getInstance().getGlobalContainerStack()
|
||||
|
||||
# Get total material used (in mm^3)
|
||||
print_information = Application.getInstance().getPrintInformation()
|
||||
material_radius = 0.5 * global_container_stack.getProperty("material_diameter", "value")
|
||||
|
||||
# TODO: Send material per extruder instead of mashing it on a pile
|
||||
material_used = math.pi * material_radius * material_radius * sum(print_information.materialLengths) #Volume of all materials used
|
||||
# Send material per extruder
|
||||
material_used = [str(math.pi * material_radius * material_radius * material_length) for material_length in print_information.materialLengths]
|
||||
material_used = ",".join(material_used)
|
||||
|
||||
# Get model information (bounding boxes, hashes and transformation matrix)
|
||||
models_info = []
|
||||
for node in DepthFirstIterator(Application.getInstance().getController().getScene().getRoot()):
|
||||
if type(node) is SceneNode and node.getMeshData() and node.getMeshData().getVertices() is not None:
|
||||
if not getattr(node, "_outside_buildarea", False):
|
||||
model_info = {}
|
||||
model_info["hash"] = node.getMeshData().getHash()
|
||||
model_info["bounding_box"] = {}
|
||||
model_info["bounding_box"]["minimum"] = {}
|
||||
model_info["bounding_box"]["minimum"]["x"] = node.getBoundingBox().minimum.x
|
||||
model_info["bounding_box"]["minimum"]["y"] = node.getBoundingBox().minimum.y
|
||||
model_info["bounding_box"]["minimum"]["z"] = node.getBoundingBox().minimum.z
|
||||
|
||||
model_info["bounding_box"]["maximum"] = {}
|
||||
model_info["bounding_box"]["maximum"]["x"] = node.getBoundingBox().maximum.x
|
||||
model_info["bounding_box"]["maximum"]["y"] = node.getBoundingBox().maximum.y
|
||||
model_info["bounding_box"]["maximum"]["z"] = node.getBoundingBox().maximum.z
|
||||
model_info["transformation"] = str(node.getWorldTransformation().getData())
|
||||
|
||||
models_info.append(model_info)
|
||||
|
||||
# Bundle the collected data
|
||||
submitted_data = {
|
||||
"processor": platform.processor(),
|
||||
"machine": platform.machine(),
|
||||
"platform": platform.platform(),
|
||||
"settings": global_container_stack.serialize(), # global_container with references on used containers
|
||||
"version": Application.getInstance().getVersion(),
|
||||
"modelhash": "None",
|
||||
"printtime": print_information.currentPrintTime.getDisplayString(DurationFormat.Format.ISO8601),
|
||||
"filament": material_used,
|
||||
"language": Preferences.getInstance().getValue("general/language"),
|
||||
}
|
||||
containers = { "": global_container_stack.serialize() }
|
||||
for container in global_container_stack.getContainers():
|
||||
container_id = container.getId()
|
||||
try:
|
||||
|
@ -129,12 +114,24 @@ class SliceInfo(Extension):
|
|||
except NotImplementedError:
|
||||
Logger.log("w", "Container %s could not be serialized!", container_id)
|
||||
continue
|
||||
|
||||
if container_serialized:
|
||||
submitted_data["settings_%s" %(container_id)] = container_serialized # This can be anything, eg. INI, JSON, etc.
|
||||
containers[container_id] = container_serialized
|
||||
else:
|
||||
Logger.log("i", "No data found in %s to be serialized!", container_id)
|
||||
|
||||
# Bundle the collected data
|
||||
submitted_data = {
|
||||
"processor": platform.processor(),
|
||||
"machine": platform.machine(),
|
||||
"platform": platform.platform(),
|
||||
"settings": json.dumps(containers), # bundle of containers with their serialized contents
|
||||
"version": Application.getInstance().getVersion(),
|
||||
"modelhash": modelhash_formatted,
|
||||
"printtime": print_information.currentPrintTime.getDisplayString(DurationFormat.Format.ISO8601),
|
||||
"filament": material_used,
|
||||
"language": Preferences.getInstance().getValue("general/language"),
|
||||
}
|
||||
|
||||
# Convert data to bytes
|
||||
submitted_data = urllib.parse.urlencode(submitted_data)
|
||||
binary_data = submitted_data.encode("utf-8")
|
||||
|
@ -145,4 +142,4 @@ class SliceInfo(Extension):
|
|||
except Exception as e:
|
||||
# We really can't afford to have a mistake here, as this would break the sending of g-code to a device
|
||||
# (Either saving or directly to a printer). The functionality of the slice data is not *that* important.
|
||||
Logger.log("e", "Exception raised while sending slice info: %s" %(repr(e))) # But we should be notified about these problems of course.
|
||||
Logger.log("e", "Exception raised while sending slice info: %s" %(repr(e))) # But we should be notified about these problems of course.
|
||||
|
|
|
@ -12,12 +12,13 @@ from UM.Settings.Validator import ValidatorState
|
|||
|
||||
from UM.View.GL.OpenGL import OpenGL
|
||||
|
||||
import cura.Settings
|
||||
import cura.Settings.ExtrudersModel
|
||||
from cura.Settings.ExtruderManager import ExtruderManager
|
||||
from cura.Settings.ExtrudersModel import ExtrudersModel
|
||||
|
||||
import math
|
||||
|
||||
## Standard view for mesh models.
|
||||
|
||||
class SolidView(View):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
@ -27,7 +28,7 @@ class SolidView(View):
|
|||
self._enabled_shader = None
|
||||
self._disabled_shader = None
|
||||
|
||||
self._extruders_model = cura.Settings.ExtrudersModel.ExtrudersModel()
|
||||
self._extruders_model = ExtrudersModel()
|
||||
|
||||
def beginRendering(self):
|
||||
scene = self.getController().getScene()
|
||||
|
@ -46,22 +47,34 @@ class SolidView(View):
|
|||
|
||||
global_container_stack = Application.getInstance().getGlobalContainerStack()
|
||||
if global_container_stack:
|
||||
multi_extrusion = global_container_stack.getProperty("machine_extruder_count", "value") > 1
|
||||
|
||||
if multi_extrusion:
|
||||
support_extruder_nr = global_container_stack.getProperty("support_extruder_nr", "value")
|
||||
support_angle_stack = ExtruderManager.getInstance().getExtruderStack(support_extruder_nr)
|
||||
if not support_angle_stack:
|
||||
support_angle_stack = global_container_stack
|
||||
else:
|
||||
support_angle_stack = global_container_stack
|
||||
|
||||
if Preferences.getInstance().getValue("view/show_overhang"):
|
||||
angle = global_container_stack.getProperty("support_angle", "value")
|
||||
if angle is not None and global_container_stack.getProperty("support_angle", "validationState") == ValidatorState.Valid:
|
||||
angle = support_angle_stack.getProperty("support_angle", "value")
|
||||
# Make sure the overhang angle is valid before passing it to the shader
|
||||
# Note: if the overhang angle is set to its default value, it does not need to get validated (validationState = None)
|
||||
if angle is not None and global_container_stack.getProperty("support_angle", "validationState") in [None, ValidatorState.Valid]:
|
||||
self._enabled_shader.setUniformValue("u_overhangAngle", math.cos(math.radians(90 - angle)))
|
||||
else:
|
||||
self._enabled_shader.setUniformValue("u_overhangAngle", math.cos(math.radians(0))) #Overhang angle of 0 causes no area at all to be marked as overhang.
|
||||
else:
|
||||
self._enabled_shader.setUniformValue("u_overhangAngle", math.cos(math.radians(0)))
|
||||
|
||||
multi_extrusion = global_container_stack.getProperty("machine_extruder_count", "value") > 1
|
||||
|
||||
for node in DepthFirstIterator(scene.getRoot()):
|
||||
if not node.render(renderer):
|
||||
if node.getMeshData() and node.isVisible():
|
||||
|
||||
uniforms = {}
|
||||
shade_factor = 1.0
|
||||
|
||||
if not multi_extrusion:
|
||||
if global_container_stack:
|
||||
material = global_container_stack.findContainer({ "type": "material" })
|
||||
|
@ -76,13 +89,17 @@ class SolidView(View):
|
|||
extruder_index = max(0, self._extruders_model.find("id", extruder_id))
|
||||
|
||||
material_color = self._extruders_model.getItem(extruder_index)["color"]
|
||||
|
||||
if extruder_index != ExtruderManager.getInstance().activeExtruderIndex:
|
||||
# Shade objects that are printed with the non-active extruder 25% darker
|
||||
shade_factor = 0.6
|
||||
try:
|
||||
# Colors are passed as rgb hex strings (eg "#ffffff"), and the shader needs
|
||||
# an rgba list of floats (eg [1.0, 1.0, 1.0, 1.0])
|
||||
uniforms["diffuse_color"] = [
|
||||
int(material_color[1:3], 16) / 255,
|
||||
int(material_color[3:5], 16) / 255,
|
||||
int(material_color[5:7], 16) / 255,
|
||||
shade_factor * int(material_color[1:3], 16) / 255,
|
||||
shade_factor * int(material_color[3:5], 16) / 255,
|
||||
shade_factor * int(material_color[5:7], 16) / 255,
|
||||
1.0
|
||||
]
|
||||
except ValueError:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2015 Ultimaker B.V.
|
||||
# Copyright (c) 2016 Ultimaker B.V.
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
from .avr_isp import stk500v2, ispBase, intelHex
|
||||
|
@ -19,13 +19,12 @@ from PyQt5.QtCore import QUrl, pyqtSlot, pyqtSignal, pyqtProperty
|
|||
from UM.i18n import i18nCatalog
|
||||
catalog = i18nCatalog("cura")
|
||||
|
||||
|
||||
class USBPrinterOutputDevice(PrinterOutputDevice):
|
||||
|
||||
def __init__(self, serial_port):
|
||||
super().__init__(serial_port)
|
||||
self.setName(catalog.i18nc("@item:inmenu", "USB printing"))
|
||||
self.setShortDescription(catalog.i18nc("@action:button", "Print via USB"))
|
||||
self.setShortDescription(catalog.i18nc("@action:button Preceded by 'Ready to'.", "Print via USB"))
|
||||
self.setDescription(catalog.i18nc("@info:tooltip", "Print via USB"))
|
||||
self.setIconName("print")
|
||||
self.setConnectionText(catalog.i18nc("@info:status", "Connected via USB"))
|
||||
|
@ -140,7 +139,7 @@ class USBPrinterOutputDevice(PrinterOutputDevice):
|
|||
# \param gcode_list List with gcode (strings).
|
||||
def printGCode(self, gcode_list):
|
||||
if self._progress or self._connection_state != ConnectionState.connected:
|
||||
self._error_message = Message(catalog.i18nc("@info:status", "Printer is busy or not connected. Unable to start a new job."))
|
||||
self._error_message = Message(catalog.i18nc("@info:status", "Unable to start a new job because the printer is busy or not connected."))
|
||||
self._error_message.show()
|
||||
Logger.log("d", "Printer is busy or not connected, aborting print")
|
||||
self.writeError.emit(self)
|
||||
|
@ -426,7 +425,14 @@ class USBPrinterOutputDevice(PrinterOutputDevice):
|
|||
self._error_state = error
|
||||
self.onError.emit()
|
||||
|
||||
def requestWrite(self, node, file_name = None, filter_by_machine = False):
|
||||
## Request the current scene to be sent to a USB-connected printer.
|
||||
#
|
||||
# \param nodes A collection of scene nodes to send. This is ignored.
|
||||
# \param file_name \type{string} A suggestion for a file name to write.
|
||||
# This is ignored.
|
||||
# \param filter_by_machine Whether to filter MIME types by machine. This
|
||||
# is ignored.
|
||||
def requestWrite(self, nodes, file_name = None, filter_by_machine = False):
|
||||
Application.getInstance().showPrintMonitor.emit(True)
|
||||
self.startPrint()
|
||||
|
||||
|
|
|
@ -103,10 +103,12 @@ class USBPrinterOutputDeviceManager(QObject, OutputDevicePlugin, Extension):
|
|||
|
||||
self._firmware_view.show()
|
||||
|
||||
@pyqtSlot()
|
||||
def updateAllFirmware(self):
|
||||
@pyqtSlot(str)
|
||||
def updateAllFirmware(self, file_name):
|
||||
if file_name.startswith("file://"):
|
||||
file_name = QUrl(file_name).toLocalFile() # File dialogs prepend the path with file://, which we don't need / want
|
||||
if not self._usb_output_devices:
|
||||
Message(i18n_catalog.i18nc("@info","Cannot update firmware, there were no connected printers found.")).show()
|
||||
Message(i18n_catalog.i18nc("@info", "Unable to update firmware because there are no printers connected.")).show()
|
||||
return
|
||||
|
||||
for printer_connection in self._usb_output_devices:
|
||||
|
@ -114,26 +116,26 @@ class USBPrinterOutputDeviceManager(QObject, OutputDevicePlugin, Extension):
|
|||
self.spawnFirmwareInterface("")
|
||||
for printer_connection in self._usb_output_devices:
|
||||
try:
|
||||
self._usb_output_devices[printer_connection].updateFirmware(Resources.getPath(CuraApplication.ResourceTypes.Firmware, self._getDefaultFirmwareName()))
|
||||
self._usb_output_devices[printer_connection].updateFirmware(file_name)
|
||||
except FileNotFoundError:
|
||||
# Should only happen in dev environments where the resources/firmware folder is absent.
|
||||
self._usb_output_devices[printer_connection].setProgress(100, 100)
|
||||
Logger.log("w", "No firmware found for printer %s called '%s'" %(printer_connection, self._getDefaultFirmwareName()))
|
||||
Logger.log("w", "No firmware found for printer %s called '%s'", printer_connection, file_name)
|
||||
Message(i18n_catalog.i18nc("@info",
|
||||
"Could not find firmware required for the printer at %s.") % printer_connection).show()
|
||||
self._firmware_view.close()
|
||||
|
||||
continue
|
||||
|
||||
@pyqtSlot(str, result = bool)
|
||||
def updateFirmwareBySerial(self, serial_port):
|
||||
@pyqtSlot(str, str, result = bool)
|
||||
def updateFirmwareBySerial(self, serial_port, file_name):
|
||||
if serial_port in self._usb_output_devices:
|
||||
self.spawnFirmwareInterface(self._usb_output_devices[serial_port].getSerialPort())
|
||||
try:
|
||||
self._usb_output_devices[serial_port].updateFirmware(Resources.getPath(CuraApplication.ResourceTypes.Firmware, self._getDefaultFirmwareName()))
|
||||
self._usb_output_devices[serial_port].updateFirmware(file_name)
|
||||
except FileNotFoundError:
|
||||
self._firmware_view.close()
|
||||
Logger.log("e", "Could not find firmware required for this machine called '%s'" %(self._getDefaultFirmwareName()))
|
||||
Logger.log("e", "Could not find firmware required for this machine called '%s'", file_name)
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
@ -147,7 +149,8 @@ class USBPrinterOutputDeviceManager(QObject, OutputDevicePlugin, Extension):
|
|||
|
||||
return USBPrinterOutputDeviceManager._instance
|
||||
|
||||
def _getDefaultFirmwareName(self):
|
||||
@pyqtSlot(result = str)
|
||||
def getDefaultFirmwareName(self):
|
||||
# Check if there is a valid global container stack
|
||||
global_container_stack = Application.getInstance().getGlobalContainerStack()
|
||||
if not global_container_stack:
|
||||
|
@ -193,13 +196,13 @@ class USBPrinterOutputDeviceManager(QObject, OutputDevicePlugin, Extension):
|
|||
Logger.log("d", "Choosing basic firmware for machine %s.", machine_id)
|
||||
hex_file = machine_without_extras[machine_id] # Return "basic" firmware
|
||||
else:
|
||||
Logger.log("e", "There is no firmware for machine %s.", machine_id)
|
||||
Logger.log("w", "There is no firmware for machine %s.", machine_id)
|
||||
|
||||
if hex_file:
|
||||
return hex_file.format(baudrate=baudrate)
|
||||
return Resources.getPath(CuraApplication.ResourceTypes.Firmware, hex_file.format(baudrate=baudrate))
|
||||
else:
|
||||
Logger.log("e", "Could not find any firmware for machine %s.", machine_id)
|
||||
raise FileNotFoundError()
|
||||
Logger.log("w", "Could not find any firmware for machine %s.", machine_id)
|
||||
return ""
|
||||
|
||||
## Helper to identify serial ports (and scan for them)
|
||||
def _addRemovePorts(self, serial_ports):
|
||||
|
|
|
@ -5,6 +5,7 @@ See: http://en.wikipedia.org/wiki/Intel_HEX
|
|||
This is a python 3 conversion of the code created by David Braam for the Cura project.
|
||||
"""
|
||||
import io
|
||||
from UM.Logger import Logger
|
||||
|
||||
def readHex(filename):
|
||||
"""
|
||||
|
@ -41,6 +42,6 @@ def readHex(filename):
|
|||
elif rec_type == 2: #Extended Segment Address Record
|
||||
extra_addr = int(line[9:13], 16) * 16
|
||||
else:
|
||||
print(rec_type, rec_len, addr, check_sum, line)
|
||||
Logger.log("d", "%s, %s, %s, %s, %s", rec_type, rec_len, addr, check_sum, line)
|
||||
f.close()
|
||||
return data
|
||||
|
|
|
@ -8,6 +8,7 @@ The ISP AVR programmer can load firmware into AVR chips. Which are commonly used
|
|||
"""
|
||||
|
||||
from . import chipDB
|
||||
from UM.Logger import Logger
|
||||
|
||||
class IspBase():
|
||||
"""
|
||||
|
@ -22,11 +23,11 @@ class IspBase():
|
|||
raise IspError("Chip with signature: " + str(self.getSignature()) + "not found")
|
||||
self.chipErase()
|
||||
|
||||
print("Flashing %i bytes" % len(flash_data))
|
||||
Logger.log("d", "Flashing %i bytes", len(flash_data))
|
||||
self.writeFlash(flash_data)
|
||||
print("Verifying %i bytes" % len(flash_data))
|
||||
Logger.log("d", "Verifying %i bytes", len(flash_data))
|
||||
self.verifyFlash(flash_data)
|
||||
print("Completed")
|
||||
Logger.log("d", "Completed")
|
||||
|
||||
def getSignature(self):
|
||||
"""
|
||||
|
|
|
@ -3,7 +3,6 @@ STK500v2 protocol implementation for programming AVR chips.
|
|||
The STK500v2 protocol is used by the ArduinoMega2560 and a few other Arduino platforms to load firmware.
|
||||
This is a python 3 conversion of the code created by David Braam for the Cura project.
|
||||
"""
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
import time
|
||||
|
@ -11,6 +10,7 @@ import time
|
|||
from serial import Serial
|
||||
from serial import SerialException
|
||||
from serial import SerialTimeoutException
|
||||
from UM.Logger import Logger
|
||||
|
||||
from . import ispBase, intelHex
|
||||
|
||||
|
@ -27,7 +27,7 @@ class Stk500v2(ispBase.IspBase):
|
|||
self.close()
|
||||
try:
|
||||
self.serial = Serial(str(port), speed, timeout=1, writeTimeout=10000)
|
||||
except SerialException as e:
|
||||
except SerialException:
|
||||
raise ispBase.IspError("Failed to open serial port")
|
||||
except:
|
||||
raise ispBase.IspError("Unexpected error while connecting to serial port:" + port + ":" + str(sys.exc_info()[0]))
|
||||
|
@ -84,14 +84,14 @@ class Stk500v2(ispBase.IspBase):
|
|||
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
|
||||
page_size = self.chip["pageSize"] * 2
|
||||
flash_size = page_size * self.chip["pageCount"]
|
||||
print("Writing flash")
|
||||
Logger.log("d", "Writing flash")
|
||||
if flash_size > 0xFFFF:
|
||||
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
|
||||
else:
|
||||
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
|
||||
load_count = (len(flash_data) + page_size - 1) / page_size
|
||||
for i in range(0, int(load_count)):
|
||||
recv = self.sendMessage([0x13, page_size >> 8, page_size & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flash_data[(i * page_size):(i * page_size + page_size)])
|
||||
self.sendMessage([0x13, page_size >> 8, page_size & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flash_data[(i * page_size):(i * page_size + page_size)])
|
||||
if self.progress_callback is not None:
|
||||
if self._has_checksum:
|
||||
self.progress_callback(i + 1, load_count)
|
||||
|
@ -151,7 +151,6 @@ class Stk500v2(ispBase.IspBase):
|
|||
raise ispBase.IspError("Timeout")
|
||||
b = struct.unpack(">B", s)[0]
|
||||
checksum ^= b
|
||||
#print(hex(b))
|
||||
if state == "Start":
|
||||
if b == 0x1B:
|
||||
state = "GetSeq"
|
||||
|
@ -183,11 +182,11 @@ class Stk500v2(ispBase.IspBase):
|
|||
def portList():
|
||||
ret = []
|
||||
import _winreg
|
||||
key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"HARDWARE\\DEVICEMAP\\SERIALCOMM")
|
||||
key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"HARDWARE\\DEVICEMAP\\SERIALCOMM") #@UndefinedVariable
|
||||
i=0
|
||||
while True:
|
||||
try:
|
||||
values = _winreg.EnumValue(key, i)
|
||||
values = _winreg.EnumValue(key, i) #@UndefinedVariable
|
||||
except:
|
||||
return ret
|
||||
if "USBSER" in values[0]:
|
||||
|
@ -206,7 +205,7 @@ def main():
|
|||
""" Entry point to call the stk500v2 programmer from the commandline. """
|
||||
import threading
|
||||
if sys.argv[1] == "AUTO":
|
||||
print(portList())
|
||||
Logger.log("d", "portList(): ", repr(portList()))
|
||||
for port in portList():
|
||||
threading.Thread(target=runProgrammer, args=(port,sys.argv[2])).start()
|
||||
time.sleep(5)
|
||||
|
|
|
@ -64,7 +64,7 @@ Cura.MachineAction
|
|||
{
|
||||
startBedLevelingButton.visible = false;
|
||||
bedlevelingButton.visible = true;
|
||||
manager.startCheck();
|
||||
manager.startBedLeveling();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ class UMOUpgradeSelection(MachineAction):
|
|||
if variant:
|
||||
if variant.getId() == "empty_variant":
|
||||
variant_index = global_container_stack.getContainerIndex(variant)
|
||||
self._createVariant(global_container_stack, variant_index)
|
||||
variant = self._createVariant(global_container_stack, variant_index)
|
||||
variant.setProperty("machine_heated_bed", "value", heated_bed)
|
||||
self.heatedBedChanged.emit()
|
||||
|
||||
|
@ -41,4 +41,5 @@ class UMOUpgradeSelection(MachineAction):
|
|||
new_variant.addMetaDataEntry("type", "variant")
|
||||
new_variant.setDefinition(global_container_stack.getBottom())
|
||||
UM.Settings.ContainerRegistry.getInstance().addContainer(new_variant)
|
||||
global_container_stack.replaceContainer(variant_index, new_variant)
|
||||
global_container_stack.replaceContainer(variant_index, new_variant)
|
||||
return new_variant
|
|
@ -1,9 +1,17 @@
|
|||
from cura.MachineAction import MachineAction
|
||||
from UM.i18n import i18nCatalog
|
||||
import cura.Settings.CuraContainerRegistry
|
||||
import UM.Settings.DefinitionContainer
|
||||
catalog = i18nCatalog("cura")
|
||||
|
||||
|
||||
class UpgradeFirmwareMachineAction(MachineAction):
|
||||
def __init__(self):
|
||||
super().__init__("UpgradeFirmware", catalog.i18nc("@action", "Upgrade Firmware"))
|
||||
self._qml_url = "UpgradeFirmwareMachineAction.qml"
|
||||
self._qml_url = "UpgradeFirmwareMachineAction.qml"
|
||||
cura.Settings.CuraContainerRegistry.getInstance().containerAdded.connect(self._onContainerAdded)
|
||||
|
||||
def _onContainerAdded(self, container):
|
||||
# Add this action as a supported action to all machine definitions
|
||||
if isinstance(container, UM.Settings.DefinitionContainer) and container.getMetaDataEntry("type") == "machine" and container.getMetaDataEntry("supports_usb_connection"):
|
||||
UM.Application.getInstance().getMachineActionManager().addSupportedAction(container.getId(), self.getKey())
|
||||
|
|
|
@ -5,6 +5,7 @@ import QtQuick 2.2
|
|||
import QtQuick.Controls 1.1
|
||||
import QtQuick.Layouts 1.1
|
||||
import QtQuick.Window 2.1
|
||||
import QtQuick.Dialogs 1.2 // For filedialog
|
||||
|
||||
import UM 1.2 as UM
|
||||
import Cura 1.0 as Cura
|
||||
|
@ -44,34 +45,45 @@ Cura.MachineAction
|
|||
anchors.topMargin: UM.Theme.getSize("default_margin").height
|
||||
width: parent.width
|
||||
wrapMode: Text.WordWrap
|
||||
text: catalog.i18nc("@label", "The firmware shipping with new Ultimakers works, but upgrades have been made to make better prints, and make calibration easier.");
|
||||
text: catalog.i18nc("@label", "The firmware shipping with new printers works, but new versions tend to have more features and improvements.");
|
||||
}
|
||||
|
||||
Label
|
||||
{
|
||||
id: upgradeText2
|
||||
anchors.top: upgradeText1.bottom
|
||||
anchors.topMargin: UM.Theme.getSize("default_margin").height
|
||||
width: parent.width
|
||||
wrapMode: Text.WordWrap
|
||||
text: catalog.i18nc("@label", "Cura requires these new features and thus your firmware will most likely need to be upgraded. You can do so now.");
|
||||
}
|
||||
Row
|
||||
{
|
||||
anchors.top: upgradeText2.bottom
|
||||
anchors.top: upgradeText1.bottom
|
||||
anchors.topMargin: UM.Theme.getSize("default_margin").height
|
||||
anchors.horizontalCenter: parent.horizontalCenter
|
||||
width: childrenRect.width
|
||||
spacing: UM.Theme.getSize("default_margin").width
|
||||
property var firmwareName: Cura.USBPrinterManager.getDefaultFirmwareName()
|
||||
Button
|
||||
{
|
||||
id: upgradeButton
|
||||
text: catalog.i18nc("@action:button","Upgrade to Marlin Firmware");
|
||||
id: autoUpgradeButton
|
||||
text: catalog.i18nc("@action:button", "Automatically upgrade Firmware");
|
||||
enabled: parent.firmwareName != ""
|
||||
onClicked:
|
||||
{
|
||||
Cura.USBPrinterManager.updateAllFirmware()
|
||||
Cura.USBPrinterManager.updateAllFirmware(parent.firmwareName)
|
||||
}
|
||||
}
|
||||
Button
|
||||
{
|
||||
id: manualUpgradeButton
|
||||
text: catalog.i18nc("@action:button", "Upload custom Firmware");
|
||||
onClicked:
|
||||
{
|
||||
customFirmwareDialog.open()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FileDialog
|
||||
{
|
||||
id: customFirmwareDialog
|
||||
title: catalog.i18nc("@title:window", "Select custom firmware")
|
||||
nameFilters: "Firmware image files (*.hex)"
|
||||
selectExisting: true
|
||||
onAccepted: Cura.USBPrinterManager.updateAllFirmware(fileUrl)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,9 +2,13 @@
|
|||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
import UM.VersionUpgrade #To indicate that a file is of incorrect format.
|
||||
import UM.VersionUpgradeManager #To schedule more files to be upgraded.
|
||||
import UM.Resources #To get the config storage path.
|
||||
|
||||
import configparser #To read config files.
|
||||
import io #To write config files to strings as if they were files.
|
||||
import os.path #To get the path to write new user profiles to.
|
||||
import urllib #To serialise the user container file name properly.
|
||||
|
||||
## Creates a new machine instance instance by parsing a serialised machine
|
||||
# instance in version 1 of the file format.
|
||||
|
@ -79,39 +83,40 @@ class MachineInstance:
|
|||
variant_materials = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateVariantForMaterials(self._variant_name, type_name)
|
||||
|
||||
#Convert to quality profile if we have one of the built-in profiles, otherwise convert to a quality-changes profile.
|
||||
if has_machine_qualities:
|
||||
material_name_in_quality = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateMaterialForProfiles(self._active_material_name)
|
||||
variant_name_in_quality = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateVariantForProfiles(self._variant_name)
|
||||
if self._active_profile_name in VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.builtInProfiles(): #This is a built-in profile name. Convert to quality.
|
||||
quality_name = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateProfile(self._active_profile_name)
|
||||
else:
|
||||
quality_name = "normal" #We have a quality-changes profile. Base it on normal, since we have no information to indicate which one it should be based on.
|
||||
if self._active_material_name == "PLA" and self._type_name == "ultimaker2plus": #UM2+ uses a different naming scheme for PLA profiles.
|
||||
active_quality = material_name_in_quality + "_" + variant_name_in_quality + "_" + quality_name
|
||||
else:
|
||||
printer_name_in_quality = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translatePrinterForProfile(self._type_name)
|
||||
active_quality = printer_name_in_quality + "_" + material_name_in_quality + "_" + variant_name_in_quality + "_" + quality_name
|
||||
|
||||
if self._active_profile_name in VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.builtInProfiles():
|
||||
active_quality_changes = "empty_quality_changes"
|
||||
else: #No built-in profile. Translate this profile to quality-changes.
|
||||
active_quality_changes = material_name_in_quality + "_" + variant_name_in_quality + "_" + quality_name
|
||||
if self._active_profile_name in VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.builtInProfiles():
|
||||
active_quality = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateProfile(self._active_profile_name)
|
||||
active_quality_changes = "empty_quality_changes"
|
||||
else:
|
||||
if self._active_profile_name in VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.builtInProfiles():
|
||||
active_quality = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateProfile(self._active_profile_name)
|
||||
active_quality_changes = "empty_quality_changes"
|
||||
else:
|
||||
active_quality = "normal"
|
||||
active_quality_changes = self._active_profile_name
|
||||
active_quality = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.getQualityFallback(type_name, variant, active_material)
|
||||
active_quality_changes = self._active_profile_name
|
||||
|
||||
if has_machine_qualities: #This machine now has machine-quality profiles.
|
||||
active_material += "_" + variant_materials #That means that the profile was split into multiple.
|
||||
current_settings = "empty" #The profile didn't know the definition ID when it was upgraded, so it will have been invalid. Sorry, your current settings are lost now.
|
||||
else:
|
||||
current_settings = self._name + "_current_settings"
|
||||
active_material += "_" + variant_materials
|
||||
|
||||
#Create a new user profile and schedule it to be upgraded.
|
||||
user_profile = configparser.ConfigParser(interpolation = None)
|
||||
user_profile["general"] = {
|
||||
"version": "2",
|
||||
"name": "Current settings",
|
||||
"definition": type_name
|
||||
}
|
||||
user_profile["metadata"] = {
|
||||
"type": "user",
|
||||
"machine": self._name
|
||||
}
|
||||
user_profile["values"] = {}
|
||||
|
||||
version_upgrade_manager = UM.VersionUpgradeManager.VersionUpgradeManager.getInstance()
|
||||
user_storage = os.path.join(UM.Resources.getDataStoragePath(), next(iter(version_upgrade_manager.getStoragePaths("user"))))
|
||||
user_profile_file = os.path.join(user_storage, urllib.parse.quote_plus(self._name) + "_current_settings.inst.cfg")
|
||||
if not os.path.exists(user_storage):
|
||||
os.makedirs(user_storage)
|
||||
with open(user_profile_file, "w", encoding = "utf-8") as file_handle:
|
||||
user_profile.write(file_handle)
|
||||
version_upgrade_manager.upgradeExtraFile(user_storage, urllib.parse.quote_plus(self._name), "user")
|
||||
|
||||
containers = [
|
||||
current_settings,
|
||||
self._name + "_current_settings", #The current profile doesn't know the definition ID when it was upgraded, only the instance ID, so it will be invalid. Sorry, your current settings are lost now.
|
||||
active_quality_changes,
|
||||
active_quality,
|
||||
active_material,
|
||||
|
|
|
@ -5,6 +5,7 @@ import configparser #To read config files.
|
|||
import io #To write config files to strings as if they were files.
|
||||
|
||||
import UM.VersionUpgrade
|
||||
from UM.Logger import Logger
|
||||
|
||||
## Creates a new profile instance by parsing a serialised profile in version 1
|
||||
# of the file format.
|
||||
|
@ -49,7 +50,7 @@ class Profile:
|
|||
self._machine_type_id = parser.get("general", "machine_type", fallback = None)
|
||||
self._machine_variant_name = parser.get("general", "machine_variant", fallback = None)
|
||||
self._machine_instance_name = parser.get("general", "machine_instance", fallback = None)
|
||||
if "material" in parser["general"]:
|
||||
if "material" in parser["general"]: #Note: Material name is unused in this upgrade.
|
||||
self._material_name = parser.get("general", "material")
|
||||
elif self._type == "material":
|
||||
self._material_name = parser.get("general", "name", fallback = None)
|
||||
|
@ -80,7 +81,7 @@ class Profile:
|
|||
import VersionUpgrade21to22 # Import here to prevent circular dependencies.
|
||||
|
||||
if self._name == "Current settings":
|
||||
self._filename += "_current_settings" #This resolves a duplicate ID arising from how Cura 2.1 stores its current settings.
|
||||
return None, None #Can't upgrade these, because the new current profile needs to specify the definition ID and the old file only had the machine instance, not the definition.
|
||||
|
||||
config = configparser.ConfigParser(interpolation = None)
|
||||
|
||||
|
@ -94,10 +95,8 @@ class Profile:
|
|||
config.set("general", "definition", "fdmprinter") #In this case, the machine definition is unknown, and it might now have machine-specific profiles, in which case this will fail.
|
||||
|
||||
config.add_section("metadata")
|
||||
if self._type:
|
||||
config.set("metadata", "type", self._type)
|
||||
else:
|
||||
config.set("metadata", "type", "quality")
|
||||
config.set("metadata", "quality_type", "normal") #This feature doesn't exist in 2.1 yet, so we don't know the actual quality type. For now, always base it on normal.
|
||||
config.set("metadata", "type", "quality_changes")
|
||||
if self._weight:
|
||||
config.set("metadata", "weight", str(self._weight))
|
||||
if self._machine_variant_name:
|
||||
|
@ -107,13 +106,13 @@ class Profile:
|
|||
config.set("metadata", "variant", self._machine_variant_name)
|
||||
|
||||
if self._settings:
|
||||
VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateSettings(self._settings)
|
||||
self._settings = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateSettings(self._settings)
|
||||
config.add_section("values")
|
||||
for key, value in self._settings.items():
|
||||
config.set("values", key, str(value))
|
||||
|
||||
if self._changed_settings_defaults:
|
||||
VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateSettings(self._changed_settings_defaults)
|
||||
self._changed_settings_defaults = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateSettings(self._changed_settings_defaults)
|
||||
config.add_section("defaults")
|
||||
for key, value in self._changed_settings_defaults.items():
|
||||
config.set("defaults", key, str(value))
|
||||
|
@ -126,34 +125,6 @@ class Profile:
|
|||
for item in disabled_settings_defaults[1:]:
|
||||
disabled_defaults_string += "," + str(item)
|
||||
|
||||
#Material metadata may cause the file to split, so do it last to minimise processing time (do more with the copy).
|
||||
filenames = []
|
||||
configs = []
|
||||
if self._material_name and self._type != "material":
|
||||
config.set("metadata", "material", self._material_name)
|
||||
filenames.append(self._filename)
|
||||
configs.append(config)
|
||||
elif self._type != "material" and self._machine_type_id in VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.machinesWithMachineQuality():
|
||||
#Split this profile into multiple profiles, one for each material.
|
||||
_new_materials = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.machinesWithMachineQuality()[self._machine_type_id]["materials"]
|
||||
_new_variants = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.machinesWithMachineQuality()[self._machine_type_id]["variants"]
|
||||
translated_machine = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translatePrinter(self._machine_type_id)
|
||||
for material_id in _new_materials:
|
||||
for variant_id in _new_variants:
|
||||
variant_id_new = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateVariant(variant_id, translated_machine)
|
||||
filenames.append("{profile}_{material}_{variant}".format(profile = self._filename, material = material_id, variant = variant_id_new))
|
||||
config_copy = configparser.ConfigParser(interpolation = None)
|
||||
config_copy.read_dict(config) #Copy the config to a new ConfigParser instance.
|
||||
variant_id_new_materials = VersionUpgrade21to22.VersionUpgrade21to22.VersionUpgrade21to22.translateVariantForMaterials(variant_id, translated_machine)
|
||||
config_copy.set("metadata", "material", "{material}_{variant}".format(material = material_id, variant = variant_id_new_materials))
|
||||
configs.append(config_copy)
|
||||
else:
|
||||
configs.append(config)
|
||||
filenames.append(self._filename)
|
||||
|
||||
outputs = []
|
||||
for config in configs:
|
||||
output = io.StringIO()
|
||||
config.write(output)
|
||||
outputs.append(output.getvalue())
|
||||
return filenames, outputs
|
||||
output = io.StringIO()
|
||||
config.write(output)
|
||||
return [self._filename], [output.getvalue()]
|
|
@ -65,11 +65,63 @@ _printer_translations_profiles = {
|
|||
}
|
||||
|
||||
## How to translate profile names from the old version to the new.
|
||||
#
|
||||
# This must have an entry for every built-in profile, since it also services
|
||||
# as a set for which profiles were built-in.
|
||||
_profile_translations = {
|
||||
"Low Quality": "low",
|
||||
"Normal Quality": "normal",
|
||||
"High Quality": "high",
|
||||
"Ulti Quality": "high" #This one doesn't have an equivalent. Map it to high.
|
||||
"Ulti Quality": "high", #This one doesn't have an equivalent. Map it to high.
|
||||
"abs_0.25_normal": "um2p_abs_0.25_normal",
|
||||
"abs_0.4_fast": "um2p_abs_0.4_fast",
|
||||
"abs_0.4_high": "um2p_abs_0.4_high",
|
||||
"abs_0.4_normal": "um2p_abs_0.4_normal",
|
||||
"abs_0.6_normal": "um2p_abs_0.6_normal",
|
||||
"abs_0.8_normal": "um2p_abs_0.8_normal",
|
||||
"cpe_0.25_normal": "um2p_cpe_0.25_normal",
|
||||
"cpe_0.4_fast": "um2p_cpe_0.4_fast",
|
||||
"cpe_0.4_high": "um2p_cpe_0.4_high",
|
||||
"cpe_0.4_normal": "um2p_cpe_0.4_normal",
|
||||
"cpe_0.6_normal": "um2p_cpe_0.6_normal",
|
||||
"cpe_0.8_normal": "um2p_cpe_0.8_normal",
|
||||
"cpep_0.4_draft": "um2p_cpep_0.4_draft",
|
||||
"cpep_0.4_normal": "um2p_cpep_0.4_normal",
|
||||
"cpep_0.6_draft": "um2p_cpep_0.6_draft",
|
||||
"cpep_0.6_normal": "um2p_cpep_0.6_normal",
|
||||
"cpep_0.8_draft": "um2p_cpep_0.8_draft",
|
||||
"cpep_0.8_normal": "um2p_cpep_0.8_normal",
|
||||
"nylon_0.25_high": "um2p_nylon_0.25_high",
|
||||
"nylon_0.25_normal": "um2p_nylon_0.25_normal",
|
||||
"nylon_0.4_fast": "um2p_nylon_0.4_fast",
|
||||
"nylon_0.4_normal": "um2p_nylon_0.4_normal",
|
||||
"nylon_0.6_fast": "um2p_nylon_0.6_fast",
|
||||
"nylon_0.6_normal": "um2p_nylon_0.6_normal",
|
||||
"nylon_0.8_draft": "um2p_nylon_0.8_draft",
|
||||
"nylon_0.8_normal": "um2p_nylon_0.8_normal",
|
||||
"pc_0.25_high": "um2p_pc_0.25_high",
|
||||
"pc_0.25_normal": "um2p_pc_0.25_normal",
|
||||
"pc_0.4_fast": "um2p_pc_0.4_fast",
|
||||
"pc_0.4_normal": "um2p_pc_0.4_normal",
|
||||
"pc_0.6_fast": "um2p_pc_0.6_fast",
|
||||
"pc_0.6_normal": "um2p_pc_0.6_normal",
|
||||
"pc_0.8_draft": "um2p_pc_0.8_draft",
|
||||
"pc_0.8_normal": "um2p_pc_0.8_normal",
|
||||
"pla_0.25_normal": "pla_0.25_normal", #Note that the PLA profiles don't get the um2p_ prefix, though they are for UM2+.
|
||||
"pla_0.4_fast": "pla_0.4_fast",
|
||||
"pla_0.4_high": "pla_0.4_high",
|
||||
"pla_0.4_normal": "pla_0.4_normal",
|
||||
"pla_0.6_normal": "pla_0.6_normal",
|
||||
"pla_0.8_normal": "pla_0.8_normal",
|
||||
"tpu_0.25_high": "um2p_tpu_0.25_high",
|
||||
"tpu_0.4_normal": "um2p_tpu_0.4_normal",
|
||||
"tpu_0.6_fast": "um2p_tpu_0.6_fast"
|
||||
}
|
||||
|
||||
## Settings that are no longer in the new version.
|
||||
_removed_settings = {
|
||||
"fill_perimeter_gaps",
|
||||
"support_area_smoothing"
|
||||
}
|
||||
|
||||
## How to translate setting names from the old version to the new.
|
||||
|
@ -78,6 +130,7 @@ _setting_name_translations = {
|
|||
"remove_overlapping_walls_enabled": "travel_compensate_overlapping_walls_enabled",
|
||||
"remove_overlapping_walls_x_enabled": "travel_compensate_overlapping_walls_x_enabled",
|
||||
"retraction_hop": "retraction_hop_enabled",
|
||||
"skin_overlap": "infill_overlap",
|
||||
"skirt_line_width": "skirt_brim_line_width",
|
||||
"skirt_minimal_length": "skirt_brim_minimal_length",
|
||||
"skirt_speed": "skirt_brim_speed",
|
||||
|
@ -91,6 +144,54 @@ _setting_name_translations = {
|
|||
"support_roof_pattern": "support_interface_pattern"
|
||||
}
|
||||
|
||||
## Custom profiles become quality_changes. This dictates which quality to base
|
||||
# the quality_changes profile on.
|
||||
#
|
||||
# Which quality profile to base the quality_changes on depends on the machine,
|
||||
# material and nozzle.
|
||||
#
|
||||
# If a current configuration is missing, fall back to "normal".
|
||||
_quality_fallbacks = {
|
||||
"ultimaker2_plus": {
|
||||
"ultimaker2_plus_0.25": {
|
||||
"generic_abs": "um2p_abs_0.25_normal",
|
||||
"generic_cpe": "um2p_cpe_0.25_normal",
|
||||
#No CPE+.
|
||||
"generic_nylon": "um2p_nylon_0.25_normal",
|
||||
"generic_pc": "um2p_pc_0.25_normal",
|
||||
"generic_pla": "pla_0.25_normal",
|
||||
"generic_tpu": "um2p_tpu_0.25_high"
|
||||
},
|
||||
"ultimaker2_plus_0.4": {
|
||||
"generic_abs": "um2p_abs_0.4_normal",
|
||||
"generic_cpe": "um2p_cpe_0.4_normal",
|
||||
"generic_cpep": "um2p_cpep_0.4_normal",
|
||||
"generic_nylon": "um2p_nylon_0.4_normal",
|
||||
"generic_pc": "um2p_pc_0.4_normal",
|
||||
"generic_pla": "pla_0.4_normal",
|
||||
"generic_tpu": "um2p_tpu_0.4_normal"
|
||||
},
|
||||
"ultimaker2_plus_0.6": {
|
||||
"generic_abs": "um2p_abs_0.6_normal",
|
||||
"generic_cpe": "um2p_cpe_0.6_normal",
|
||||
"generic_cpep": "um2p_cpep_0.6_normal",
|
||||
"generic_nylon": "um2p_nylon_0.6_normal",
|
||||
"generic_pc": "um2p_pc_0.6_normal",
|
||||
"generic_pla": "pla_0.6_normal",
|
||||
"generic_tpu": "um2p_tpu_0.6_fast",
|
||||
},
|
||||
"ultimaker2_plus_0.8": {
|
||||
"generic_abs": "um2p_abs_0.8_normal",
|
||||
"generic_cpe": "um2p_cpe_0.8_normal",
|
||||
"generic_cpep": "um2p_cpep_0.8_normal",
|
||||
"generic_nylon": "um2p_nylon_0.8_normal",
|
||||
"generic_pc": "um2p_pc_0.8_normal",
|
||||
"generic_pla": "pla_0.8_normal",
|
||||
#No TPU.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
## How to translate variants of specific machines from the old version to the
|
||||
# new.
|
||||
_variant_translations = {
|
||||
|
@ -150,6 +251,25 @@ class VersionUpgrade21to22(VersionUpgrade):
|
|||
parser.read_string(serialised)
|
||||
return int(parser.get("general", "version")) #Explicitly give an exception when this fails. That means that the file format is not recognised.
|
||||
|
||||
## Gets the fallback quality to use for a specific machine-variant-material
|
||||
# combination.
|
||||
#
|
||||
# For custom profiles we fall back onto this quality profile, since we
|
||||
# don't know which quality profile it was based on.
|
||||
#
|
||||
# \param machine The machine ID of the user's configuration in 2.2.
|
||||
# \param variant The variant ID of the user's configuration in 2.2.
|
||||
# \param material The material ID of the user's configuration in 2.2.
|
||||
@staticmethod
|
||||
def getQualityFallback(machine, variant, material):
|
||||
if machine not in _quality_fallbacks:
|
||||
return "normal"
|
||||
if variant not in _quality_fallbacks[machine]:
|
||||
return "normal"
|
||||
if material not in _quality_fallbacks[machine][variant]:
|
||||
return "normal"
|
||||
return _quality_fallbacks[machine][variant][material]
|
||||
|
||||
## Gets the set of built-in profile names in Cura 2.1.
|
||||
#
|
||||
# This is required to test if profiles should be converted to a quality
|
||||
|
@ -271,15 +391,21 @@ class VersionUpgrade21to22(VersionUpgrade):
|
|||
# \return The same dictionary.
|
||||
@staticmethod
|
||||
def translateSettings(settings):
|
||||
new_settings = {}
|
||||
for key, value in settings.items():
|
||||
if key == "fill_perimeter_gaps": #Setting is removed.
|
||||
del settings[key]
|
||||
elif key == "retraction_combing": #Combing was made into an enum instead of a boolean.
|
||||
settings[key] = "off" if (value == "False") else "all"
|
||||
elif key in _setting_name_translations:
|
||||
del settings[key]
|
||||
settings[_setting_name_translations[key]] = value
|
||||
return settings
|
||||
if key in _removed_settings:
|
||||
continue
|
||||
if key == "retraction_combing": #Combing was made into an enum instead of a boolean.
|
||||
new_settings[key] = "off" if (value == "False") else "all"
|
||||
continue
|
||||
if key == "cool_fan_full_layer": #Layer counting was made one-indexed.
|
||||
new_settings[key] = str(int(value) + 1)
|
||||
continue
|
||||
if key in _setting_name_translations:
|
||||
new_settings[_setting_name_translations[key]] = value
|
||||
continue
|
||||
new_settings[key] = value
|
||||
return new_settings
|
||||
|
||||
## Translates a setting name for the change from Cura 2.1 to 2.2.
|
||||
#
|
||||
|
|
|
@ -35,6 +35,10 @@ def getMetaData():
|
|||
"preferences": {
|
||||
"get_version": upgrade.getCfgVersion,
|
||||
"location": {"."}
|
||||
},
|
||||
"user": {
|
||||
"get_version": upgrade.getCfgVersion,
|
||||
"location": {"./user"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
916
plugins/X3DReader/X3DReader.py
Normal file
916
plugins/X3DReader/X3DReader.py
Normal file
|
@ -0,0 +1,916 @@
|
|||
# Contributed by Seva Alekseyev <sevaa@nih.gov> with National Institutes of Health, 2016
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
from UM.Mesh.MeshReader import MeshReader
|
||||
from UM.Mesh.MeshBuilder import MeshBuilder
|
||||
from UM.Logger import Logger
|
||||
from UM.Math.Matrix import Matrix
|
||||
from UM.Math.Vector import Vector
|
||||
from UM.Scene.SceneNode import SceneNode
|
||||
from UM.Job import Job
|
||||
from math import pi, sin, cos, sqrt
|
||||
import numpy
|
||||
|
||||
try:
|
||||
import xml.etree.cElementTree as ET
|
||||
except ImportError:
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
# TODO: preserve the structure of scenes that contain several objects
|
||||
# Use CADPart, for example, to distinguish between separate objects
|
||||
|
||||
DEFAULT_SUBDIV = 16 # Default subdivision factor for spheres, cones, and cylinders
|
||||
EPSILON = 0.000001
|
||||
|
||||
class Shape:
|
||||
|
||||
# Expects verts in MeshBuilder-ready format, as a n by 3 mdarray
|
||||
# with vertices stored in rows
|
||||
def __init__(self, verts, faces, index_base, name):
|
||||
self.verts = verts
|
||||
self.faces = faces
|
||||
# Those are here for debugging purposes only
|
||||
self.index_base = index_base
|
||||
self.name = name
|
||||
|
||||
class X3DReader(MeshReader):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._supported_extensions = [".x3d"]
|
||||
self._namespaces = {}
|
||||
|
||||
# Main entry point
|
||||
# Reads the file, returns a SceneNode (possibly with nested ones), or None
|
||||
def read(self, file_name):
|
||||
try:
|
||||
self.defs = {}
|
||||
self.shapes = []
|
||||
|
||||
tree = ET.parse(file_name)
|
||||
xml_root = tree.getroot()
|
||||
|
||||
if xml_root.tag != "X3D":
|
||||
return None
|
||||
|
||||
scale = 1000 # Default X3D unit it one meter, while Cura's is one millimeters
|
||||
if xml_root[0].tag == "head":
|
||||
for head_node in xml_root[0]:
|
||||
if head_node.tag == "unit" and head_node.attrib.get("category") == "length":
|
||||
scale *= float(head_node.attrib["conversionFactor"])
|
||||
break
|
||||
xml_scene = xml_root[1]
|
||||
else:
|
||||
xml_scene = xml_root[0]
|
||||
|
||||
if xml_scene.tag != "Scene":
|
||||
return None
|
||||
|
||||
self.transform = Matrix()
|
||||
self.transform.setByScaleFactor(scale)
|
||||
self.index_base = 0
|
||||
|
||||
# Traverse the scene tree, populate the shapes list
|
||||
self.processChildNodes(xml_scene)
|
||||
|
||||
if self.shapes:
|
||||
builder = MeshBuilder()
|
||||
builder.setVertices(numpy.concatenate([shape.verts for shape in self.shapes]))
|
||||
builder.setIndices(numpy.concatenate([shape.faces for shape in self.shapes]))
|
||||
builder.calculateNormals()
|
||||
builder.setFileName(file_name)
|
||||
mesh_data = builder.build()
|
||||
|
||||
# Manually try and get the extents of the mesh_data. This should prevent nasty NaN issues from
|
||||
# leaving the reader.
|
||||
mesh_data.getExtents()
|
||||
|
||||
node = SceneNode()
|
||||
node.setMeshData(mesh_data)
|
||||
node.setSelectable(True)
|
||||
node.setName(file_name)
|
||||
|
||||
else:
|
||||
return None
|
||||
|
||||
except Exception:
|
||||
Logger.logException("e", "Exception in X3D reader")
|
||||
return None
|
||||
|
||||
return node
|
||||
|
||||
# ------------------------- XML tree traversal
|
||||
|
||||
def processNode(self, xml_node):
|
||||
xml_node = self.resolveDefUse(xml_node)
|
||||
if xml_node is None:
|
||||
return
|
||||
|
||||
tag = xml_node.tag
|
||||
if tag in ("Group", "StaticGroup", "CADAssembly", "CADFace", "CADLayer", "Collision"):
|
||||
self.processChildNodes(xml_node)
|
||||
if tag == "CADPart":
|
||||
self.processTransform(xml_node) # TODO: split the parts
|
||||
elif tag == "LOD":
|
||||
self.processNode(xml_node[0])
|
||||
elif tag == "Transform":
|
||||
self.processTransform(xml_node)
|
||||
elif tag == "Shape":
|
||||
self.processShape(xml_node)
|
||||
|
||||
|
||||
def processShape(self, xml_node):
|
||||
# Find the geometry and the appearance inside the Shape
|
||||
geometry = appearance = None
|
||||
for sub_node in xml_node:
|
||||
if sub_node.tag == "Appearance" and not appearance:
|
||||
appearance = self.resolveDefUse(sub_node)
|
||||
elif sub_node.tag in self.geometry_importers and not geometry:
|
||||
geometry = self.resolveDefUse(sub_node)
|
||||
|
||||
# TODO: appearance is completely ignored. At least apply the material color...
|
||||
if not geometry is None:
|
||||
try:
|
||||
self.verts = self.faces = [] # Safeguard
|
||||
self.geometry_importers[geometry.tag](self, geometry)
|
||||
m = self.transform.getData()
|
||||
verts = m.dot(self.verts)[:3].transpose()
|
||||
|
||||
self.shapes.append(Shape(verts, self.faces, self.index_base, geometry.tag))
|
||||
self.index_base += len(verts)
|
||||
|
||||
except Exception:
|
||||
Logger.logException("e", "Exception in X3D reader while reading %s", geometry.tag)
|
||||
|
||||
# Returns the referenced node if the node has USE, the same node otherwise.
|
||||
# May return None is USE points at a nonexistent node
|
||||
# In X3DOM, when both DEF and USE are in the same node, DEF is ignored.
|
||||
# Big caveat: XML element objects may evaluate to boolean False!!!
|
||||
# Don't ever use "if node:", use "if not node is None:" instead
|
||||
def resolveDefUse(self, node):
|
||||
USE = node.attrib.get("USE")
|
||||
if USE:
|
||||
return self.defs.get(USE, None)
|
||||
|
||||
DEF = node.attrib.get("DEF")
|
||||
if DEF:
|
||||
self.defs[DEF] = node
|
||||
return node
|
||||
|
||||
def processChildNodes(self, node):
|
||||
for c in node:
|
||||
self.processNode(c)
|
||||
Job.yieldThread()
|
||||
|
||||
# Since this is a grouping node, will recurse down the tree.
|
||||
# According to the spec, the final transform matrix is:
|
||||
# T * C * R * SR * S * -SR * -C
|
||||
# Where SR corresponds to the rotation matrix to scaleOrientation
|
||||
# C and SR are rather exotic. S, slightly less so.
|
||||
def processTransform(self, node):
|
||||
rot = readRotation(node, "rotation", (0, 0, 1, 0)) # (angle, axisVactor) tuple
|
||||
trans = readVector(node, "translation", (0, 0, 0)) # Vector
|
||||
scale = readVector(node, "scale", (1, 1, 1)) # Vector
|
||||
center = readVector(node, "center", (0, 0, 0)) # Vector
|
||||
scale_orient = readRotation(node, "scaleOrientation", (0, 0, 1, 0)) # (angle, axisVactor) tuple
|
||||
|
||||
# Store the previous transform; in Cura, the default matrix multiplication is in place
|
||||
prev = Matrix(self.transform.getData()) # It's deep copy, I've checked
|
||||
|
||||
# The rest of transform manipulation will be applied in place
|
||||
got_center = (center.x != 0 or center.y != 0 or center.z != 0)
|
||||
|
||||
T = self.transform
|
||||
if trans.x != 0 or trans.y != 0 or trans.z !=0:
|
||||
T.translate(trans)
|
||||
if got_center:
|
||||
T.translate(center)
|
||||
if rot[0] != 0:
|
||||
T.rotateByAxis(*rot)
|
||||
if scale.x != 1 or scale.y != 1 or scale.z != 1:
|
||||
got_scale_orient = scale_orient[0] != 0
|
||||
if got_scale_orient:
|
||||
T.rotateByAxis(*scale_orient)
|
||||
# No scale by vector in place operation in UM
|
||||
S = Matrix()
|
||||
S.setByScaleVector(scale)
|
||||
T.multiply(S)
|
||||
if got_scale_orient:
|
||||
T.rotateByAxis(-scale_orient[0], scale_orient[1])
|
||||
if got_center:
|
||||
T.translate(-center)
|
||||
|
||||
self.processChildNodes(node)
|
||||
self.transform = prev
|
||||
|
||||
# ------------------------- Geometry importers
|
||||
# They are supposed to fill the self.verts and self.faces arrays, the caller will do the rest
|
||||
|
||||
# Primitives
|
||||
|
||||
def processGeometryBox(self, node):
|
||||
(dx, dy, dz) = readFloatArray(node, "size", [2, 2, 2])
|
||||
dx /= 2
|
||||
dy /= 2
|
||||
dz /= 2
|
||||
self.reserveFaceAndVertexCount(12, 8)
|
||||
|
||||
# xz plane at +y, ccw
|
||||
self.addVertex(dx, dy, dz)
|
||||
self.addVertex(-dx, dy, dz)
|
||||
self.addVertex(-dx, dy, -dz)
|
||||
self.addVertex(dx, dy, -dz)
|
||||
# xz plane at -y
|
||||
self.addVertex(dx, -dy, dz)
|
||||
self.addVertex(-dx, -dy, dz)
|
||||
self.addVertex(-dx, -dy, -dz)
|
||||
self.addVertex(dx, -dy, -dz)
|
||||
|
||||
self.addQuad(0, 1, 2, 3) # +y
|
||||
self.addQuad(4, 0, 3, 7) # +x
|
||||
self.addQuad(7, 3, 2, 6) # -z
|
||||
self.addQuad(6, 2, 1, 5) # -x
|
||||
self.addQuad(5, 1, 0, 4) # +z
|
||||
self.addQuad(7, 6, 5, 4) # -y
|
||||
|
||||
# The sphere is subdivided into nr rings and ns segments
|
||||
def processGeometrySphere(self, node):
|
||||
r = readFloat(node, "radius", 0.5)
|
||||
subdiv = readIntArray(node, "subdivision", None)
|
||||
if subdiv:
|
||||
if len(subdiv) == 1:
|
||||
nr = ns = subdiv[0]
|
||||
else:
|
||||
(nr, ns) = subdiv
|
||||
else:
|
||||
nr = ns = DEFAULT_SUBDIV
|
||||
|
||||
lau = pi / nr # Unit angle of latitude (rings) for the given tesselation
|
||||
lou = 2 * pi / ns # Unit angle of longitude (segments)
|
||||
|
||||
self.reserveFaceAndVertexCount(ns*(nr*2 - 2), 2 + (nr - 1)*ns)
|
||||
|
||||
# +y and -y poles
|
||||
self.addVertex(0, r, 0)
|
||||
self.addVertex(0, -r, 0)
|
||||
|
||||
# The non-polar vertices go from x=0, negative z plane counterclockwise -
|
||||
# to -x, to +z, to +x, back to -z
|
||||
for ring in range(1, nr):
|
||||
for seg in range(ns):
|
||||
self.addVertex(-r*sin(lou * seg) * sin(lau * ring),
|
||||
r*cos(lau * ring),
|
||||
-r*cos(lou * seg) * sin(lau * ring))
|
||||
|
||||
vb = 2 + (nr - 2) * ns # First vertex index for the bottom cap
|
||||
|
||||
# Faces go in order: top cap, sides, bottom cap.
|
||||
# Sides go by ring then by segment.
|
||||
|
||||
# Caps
|
||||
# Top cap face vertices go in order: down right up
|
||||
# (starting from +y pole)
|
||||
# Bottom cap goes: up left down (starting from -y pole)
|
||||
for seg in range(ns):
|
||||
self.addTri(0, seg + 2, (seg + 1) % ns + 2)
|
||||
self.addTri(1, vb + (seg + 1) % ns, vb + seg)
|
||||
|
||||
# Sides
|
||||
# Side face vertices go in order: down right upleft, downright up left
|
||||
for ring in range(nr - 2):
|
||||
tvb = 2 + ring * ns
|
||||
# First vertex index for the top edge of the ring
|
||||
bvb = tvb + ns
|
||||
# First vertex index for the bottom edge of the ring
|
||||
for seg in range(ns):
|
||||
nseg = (seg + 1) % ns
|
||||
self.addQuad(tvb + seg, bvb + seg, bvb + nseg, tvb + nseg)
|
||||
|
||||
def processGeometryCone(self, node):
|
||||
r = readFloat(node, "bottomRadius", 1)
|
||||
height = readFloat(node, "height", 2)
|
||||
bottom = readBoolean(node, "bottom", True)
|
||||
side = readBoolean(node, "side", True)
|
||||
n = readInt(node, "subdivision", DEFAULT_SUBDIV)
|
||||
|
||||
d = height / 2
|
||||
angle = 2 * pi / n
|
||||
|
||||
self.reserveFaceAndVertexCount((n if side else 0) + (n-2 if bottom else 0), n+1)
|
||||
|
||||
# Vertex 0 is the apex, vertices 1..n are the bottom
|
||||
self.addVertex(0, d, 0)
|
||||
for i in range(n):
|
||||
self.addVertex(-r * sin(angle * i), -d, -r * cos(angle * i))
|
||||
|
||||
# Side face vertices go: up down right
|
||||
if side:
|
||||
for i in range(n):
|
||||
self.addTri(1 + (i + 1) % n, 0, 1 + i)
|
||||
if bottom:
|
||||
for i in range(2, n):
|
||||
self.addTri(1, i, i+1)
|
||||
|
||||
def processGeometryCylinder(self, node):
|
||||
r = readFloat(node, "radius", 1)
|
||||
height = readFloat(node, "height", 2)
|
||||
bottom = readBoolean(node, "bottom", True)
|
||||
side = readBoolean(node, "side", True)
|
||||
top = readBoolean(node, "top", True)
|
||||
n = readInt(node, "subdivision", DEFAULT_SUBDIV)
|
||||
|
||||
nn = n * 2
|
||||
angle = 2 * pi / n
|
||||
hh = height/2
|
||||
|
||||
self.reserveFaceAndVertexCount((nn if side else 0) + (n - 2 if top else 0) + (n - 2 if bottom else 0), nn)
|
||||
|
||||
# The seam is at x=0, z=-r, vertices go ccw -
|
||||
# to pos x, to neg z, to neg x, back to neg z
|
||||
for i in range(n):
|
||||
rs = -r * sin(angle * i)
|
||||
rc = -r * cos(angle * i)
|
||||
self.addVertex(rs, hh, rc)
|
||||
self.addVertex(rs, -hh, rc)
|
||||
|
||||
if side:
|
||||
for i in range(n):
|
||||
ni = (i + 1) % n
|
||||
self.addQuad(ni * 2 + 1, ni * 2, i * 2, i * 2 + 1)
|
||||
|
||||
for i in range(2, nn-3, 2):
|
||||
if top:
|
||||
self.addTri(0, i, i+2)
|
||||
if bottom:
|
||||
self.addTri(1, i+1, i+3)
|
||||
|
||||
# Semi-primitives
|
||||
|
||||
def processGeometryElevationGrid(self, node):
|
||||
dx = readFloat(node, "xSpacing", 1)
|
||||
dz = readFloat(node, "zSpacing", 1)
|
||||
nx = readInt(node, "xDimension", 0)
|
||||
nz = readInt(node, "zDimension", 0)
|
||||
height = readFloatArray(node, "height", False)
|
||||
ccw = readBoolean(node, "ccw", True)
|
||||
|
||||
if nx <= 0 or nz <= 0 or len(height) < nx*nz:
|
||||
return # That's weird, the wording of the standard suggests grids with zero quads are somehow valid
|
||||
|
||||
self.reserveFaceAndVertexCount(2*(nx-1)*(nz-1), nx*nz)
|
||||
|
||||
for z in range(nz):
|
||||
for x in range(nx):
|
||||
self.addVertex(x * dx, height[z*nx + x], z * dz)
|
||||
|
||||
for z in range(1, nz):
|
||||
for x in range(1, nx):
|
||||
self.addTriFlip((z - 1)*nx + x - 1, z*nx + x, (z - 1)*nx + x, ccw)
|
||||
self.addTriFlip((z - 1)*nx + x - 1, z*nx + x - 1, z*nx + x, ccw)
|
||||
|
||||
def processGeometryExtrusion(self, node):
|
||||
ccw = readBoolean(node, "ccw", True)
|
||||
begin_cap = readBoolean(node, "beginCap", True)
|
||||
end_cap = readBoolean(node, "endCap", True)
|
||||
cross = readFloatArray(node, "crossSection", (1, 1, 1, -1, -1, -1, -1, 1, 1, 1))
|
||||
cross = [(cross[i], cross[i+1]) for i in range(0, len(cross), 2)]
|
||||
spine = readFloatArray(node, "spine", (0, 0, 0, 0, 1, 0))
|
||||
spine = [(spine[i], spine[i+1], spine[i+2]) for i in range(0, len(spine), 3)]
|
||||
orient = readFloatArray(node, "orientation", None)
|
||||
if orient:
|
||||
# This converts X3D's axis/angle rotation to a 3x3 numpy matrix
|
||||
def toRotationMatrix(rot):
|
||||
(x, y, z) = rot[:3]
|
||||
a = rot[3]
|
||||
s = sin(a)
|
||||
c = cos(a)
|
||||
t = 1-c
|
||||
return numpy.array((
|
||||
(x * x * t + c, x * y * t - z*s, x * z * t + y * s),
|
||||
(x * y * t + z*s, y * y * t + c, y * z * t - x * s),
|
||||
(x * z * t - y * s, y * z * t + x * s, z * z * t + c)))
|
||||
|
||||
orient = [toRotationMatrix(orient[i:i+4]) if orient[i+3] != 0 else None for i in range(0, len(orient), 4)]
|
||||
|
||||
scale = readFloatArray(node, "scale", None)
|
||||
if scale:
|
||||
scale = [numpy.array(((scale[i], 0, 0), (0, 1, 0), (0, 0, scale[i+1])))
|
||||
if scale[i] != 1 or scale[i+1] != 1 else None for i in range(0, len(scale), 2)]
|
||||
|
||||
|
||||
# Special treatment for the closed spine and cross section.
|
||||
# Let's save some memory by not creating identical but distinct vertices;
|
||||
# later we'll introduce conditional logic to link the last vertex with
|
||||
# the first one where necessary.
|
||||
crossClosed = cross[0] == cross[-1]
|
||||
if crossClosed:
|
||||
cross = cross[:-1]
|
||||
nc = len(cross)
|
||||
cross = [numpy.array((c[0], 0, c[1])) for c in cross]
|
||||
ncf = nc if crossClosed else nc - 1
|
||||
# Face count along the cross; for closed cross, it's the same as the
|
||||
# respective vertex count
|
||||
|
||||
spine_closed = spine[0] == spine[-1]
|
||||
if spine_closed:
|
||||
spine = spine[:-1]
|
||||
ns = len(spine)
|
||||
spine = [Vector(*s) for s in spine]
|
||||
nsf = ns if spine_closed else ns - 1
|
||||
|
||||
# This will be used for fallback, where the current spine point joins
|
||||
# two collinear spine segments. No need to recheck the case of the
|
||||
# closed spine/last-to-first point juncture; if there's an angle there,
|
||||
# it would kick in on the first iteration of the main loop by spine.
|
||||
def findFirstAngleNormal():
|
||||
for i in range(1, ns - 1):
|
||||
spt = spine[i]
|
||||
z = (spine[i + 1] - spt).cross(spine[i - 1] - spt)
|
||||
if z.length() > EPSILON:
|
||||
return z
|
||||
# All the spines are collinear. Fallback to the rotated source
|
||||
# XZ plane.
|
||||
# TODO: handle the situation where the first two spine points match
|
||||
if len(spine) < 2:
|
||||
return Vector(0, 0, 1)
|
||||
v = spine[1] - spine[0]
|
||||
orig_y = Vector(0, 1, 0)
|
||||
orig_z = Vector(0, 0, 1)
|
||||
if v.cross(orig_y).length() > EPSILON:
|
||||
# Spine at angle with global y - rotate the z accordingly
|
||||
a = v.cross(orig_y) # Axis of rotation to get to the Z
|
||||
(x, y, z) = a.normalized().getData()
|
||||
s = a.length()/v.length()
|
||||
c = sqrt(1-s*s)
|
||||
t = 1-c
|
||||
m = numpy.array((
|
||||
(x * x * t + c, x * y * t + z*s, x * z * t - y * s),
|
||||
(x * y * t - z*s, y * y * t + c, y * z * t + x * s),
|
||||
(x * z * t + y * s, y * z * t - x * s, z * z * t + c)))
|
||||
orig_z = Vector(*m.dot(orig_z.getData()))
|
||||
return orig_z
|
||||
|
||||
self.reserveFaceAndVertexCount(2*nsf*ncf + (nc - 2 if begin_cap else 0) + (nc - 2 if end_cap else 0), ns*nc)
|
||||
|
||||
z = None
|
||||
for i, spt in enumerate(spine):
|
||||
if (i > 0 and i < ns - 1) or spine_closed:
|
||||
snext = spine[(i + 1) % ns]
|
||||
sprev = spine[(i - 1 + ns) % ns]
|
||||
y = snext - sprev
|
||||
vnext = snext - spt
|
||||
vprev = sprev - spt
|
||||
try_z = vnext.cross(vprev)
|
||||
# Might be zero, then all kinds of fallback
|
||||
if try_z.length() > EPSILON:
|
||||
if z is not None and try_z.dot(z) < 0:
|
||||
try_z = -try_z
|
||||
z = try_z
|
||||
elif not z: # No z, and no previous z.
|
||||
# Look ahead, see if there's at least one point where
|
||||
# spines are not collinear.
|
||||
z = findFirstAngleNormal()
|
||||
elif i == 0: # And non-crossed
|
||||
snext = spine[i + 1]
|
||||
y = snext - spt
|
||||
z = findFirstAngleNormal()
|
||||
else: # last point and not crossed
|
||||
sprev = spine[i - 1]
|
||||
y = spt - sprev
|
||||
# If there's more than one point in the spine, z is already set.
|
||||
# One point in the spline is an error anyway.
|
||||
|
||||
z = z.normalized()
|
||||
y = y.normalized()
|
||||
x = y.cross(z) # Already normalized
|
||||
m = numpy.array(((x.x, y.x, z.x), (x.y, y.y, z.y), (x.z, y.z, z.z)))
|
||||
|
||||
# Columns are the unit vectors for the xz plane for the cross-section
|
||||
if orient:
|
||||
mrot = orient[i] if len(orient) > 1 else orient[0]
|
||||
if not mrot is None:
|
||||
m = m.dot(mrot) # Tested against X3DOM, the result matches, still not sure :(
|
||||
|
||||
if scale:
|
||||
mscale = scale[i] if len(scale) > 1 else scale[0]
|
||||
if not mscale is None:
|
||||
m = m.dot(mscale)
|
||||
|
||||
# First the cross-section 2-vector is scaled,
|
||||
# then rotated (which may make it a 3-vector),
|
||||
# then applied to the xz plane unit vectors
|
||||
|
||||
sptv3 = numpy.array(spt.getData()[:3])
|
||||
for cpt in cross:
|
||||
v = sptv3 + m.dot(cpt)
|
||||
self.addVertex(*v)
|
||||
|
||||
if begin_cap:
|
||||
self.addFace([x for x in range(nc - 1, -1, -1)], ccw)
|
||||
|
||||
# Order of edges in the face: forward along cross, forward along spine,
|
||||
# backward along cross, backward along spine, flipped if now ccw.
|
||||
# This order is assumed later in the texture coordinate assignment;
|
||||
# please don't change without syncing.
|
||||
|
||||
for s in range(ns - 1):
|
||||
for c in range(ncf):
|
||||
self.addQuadFlip(s * nc + c, s * nc + (c + 1) % nc,
|
||||
(s + 1) * nc + (c + 1) % nc, (s + 1) * nc + c, ccw)
|
||||
|
||||
if spine_closed:
|
||||
# The faces between the last and the first spine points
|
||||
b = (ns - 1) * nc
|
||||
for c in range(ncf):
|
||||
self.addQuadFlip(b + c, b + (c + 1) % nc,
|
||||
(c + 1) % nc, c, ccw)
|
||||
|
||||
if end_cap:
|
||||
self.addFace([(ns - 1) * nc + x for x in range(0, nc)], ccw)
|
||||
|
||||
# Triangle meshes
|
||||
|
||||
# Helper for numerous nodes with a Coordinate subnode holding vertices
|
||||
# That all triangle meshes and IndexedFaceSet
|
||||
# num_faces can be a function, in case the face count is a function of vertex count
|
||||
def startCoordMesh(self, node, num_faces):
|
||||
ccw = readBoolean(node, "ccw", True)
|
||||
self.readVertices(node) # This will allocate and fill the vertex array
|
||||
if hasattr(num_faces, "__call__"):
|
||||
num_faces = num_faces(self.getVertexCount())
|
||||
self.reserveFaceCount(num_faces)
|
||||
|
||||
return ccw
|
||||
|
||||
|
||||
def processGeometryIndexedTriangleSet(self, node):
|
||||
index = readIntArray(node, "index", [])
|
||||
num_faces = len(index) // 3
|
||||
ccw = int(self.startCoordMesh(node, num_faces))
|
||||
|
||||
for i in range(0, num_faces*3, 3):
|
||||
self.addTri(index[i + 1 - ccw], index[i + ccw], index[i+2])
|
||||
|
||||
def processGeometryIndexedTriangleStripSet(self, node):
|
||||
strips = readIndex(node, "index")
|
||||
ccw = int(self.startCoordMesh(node, sum([len(strip) - 2 for strip in strips])))
|
||||
|
||||
for strip in strips:
|
||||
sccw = ccw # Running CCW value, reset for each strip
|
||||
for i in range(len(strip) - 2):
|
||||
self.addTri(strip[i + 1 - sccw], strip[i + sccw], strip[i+2])
|
||||
sccw = 1 - sccw
|
||||
|
||||
def processGeometryIndexedTriangleFanSet(self, node):
|
||||
fans = readIndex(node, "index")
|
||||
ccw = int(self.startCoordMesh(node, sum([len(fan) - 2 for fan in fans])))
|
||||
|
||||
for fan in fans:
|
||||
for i in range(1, len(fan) - 1):
|
||||
self.addTri(fan[0], fan[i + 1 - ccw], fan[i + ccw])
|
||||
|
||||
def processGeometryTriangleSet(self, node):
|
||||
ccw = int(self.startCoordMesh(node, lambda num_vert: num_vert // 3))
|
||||
for i in range(0, self.getVertexCount(), 3):
|
||||
self.addTri(i + 1 - ccw, i + ccw, i+2)
|
||||
|
||||
def processGeometryTriangleStripSet(self, node):
|
||||
strips = readIntArray(node, "stripCount", [])
|
||||
ccw = int(self.startCoordMesh(node, sum([n-2 for n in strips])))
|
||||
|
||||
vb = 0
|
||||
for n in strips:
|
||||
sccw = ccw
|
||||
for i in range(n-2):
|
||||
self.addTri(vb + i + 1 - sccw, vb + i + sccw, vb + i + 2)
|
||||
sccw = 1 - sccw
|
||||
vb += n
|
||||
|
||||
def processGeometryTriangleFanSet(self, node):
|
||||
fans = readIntArray(node, "fanCount", [])
|
||||
ccw = int(self.startCoordMesh(node, sum([n-2 for n in fans])))
|
||||
|
||||
vb = 0
|
||||
for n in fans:
|
||||
for i in range(1, n-1):
|
||||
self.addTri(vb, vb + i + 1 - ccw, vb + i + ccw)
|
||||
vb += n
|
||||
|
||||
# Quad geometries from the CAD module, might be relevant for printing
|
||||
|
||||
def processGeometryQuadSet(self, node):
|
||||
ccw = self.startCoordMesh(node, lambda num_vert: 2*(num_vert // 4))
|
||||
for i in range(0, self.getVertexCount(), 4):
|
||||
self.addQuadFlip(i, i+1, i+2, i+3, ccw)
|
||||
|
||||
def processGeometryIndexedQuadSet(self, node):
|
||||
index = readIntArray(node, "index", [])
|
||||
num_quads = len(index) // 4
|
||||
ccw = self.startCoordMesh(node, num_quads*2)
|
||||
|
||||
for i in range(0, num_quads*4, 4):
|
||||
self.addQuadFlip(index[i], index[i+1], index[i+2], index[i+3], ccw)
|
||||
|
||||
# 2D polygon geometries
|
||||
# Won't work for now, since Cura expects every mesh to have a nontrivial convex hull
|
||||
# The only way around that is merging meshes.
|
||||
|
||||
def processGeometryDisk2D(self, node):
|
||||
innerRadius = readFloat(node, "innerRadius", 0)
|
||||
outerRadius = readFloat(node, "outerRadius", 1)
|
||||
n = readInt(node, "subdivision", DEFAULT_SUBDIV)
|
||||
|
||||
angle = 2 * pi / n
|
||||
|
||||
self.reserveFaceAndVertexCount(n*4 if innerRadius else n-2, n*2 if innerRadius else n)
|
||||
|
||||
for i in range(n):
|
||||
s = sin(angle * i)
|
||||
c = cos(angle * i)
|
||||
self.addVertex(outerRadius*c, outerRadius*s, 0)
|
||||
if innerRadius:
|
||||
self.addVertex(innerRadius*c, innerRadius*s, 0)
|
||||
ni = (i+1) % n
|
||||
self.addQuad(2*i, 2*ni, 2*ni+1, 2*i+1)
|
||||
|
||||
if not innerRadius:
|
||||
for i in range(2, n):
|
||||
self.addTri(0, i-1, i)
|
||||
|
||||
def processGeometryRectangle2D(self, node):
|
||||
(x, y) = readFloatArray(node, "size", (2, 2))
|
||||
self.reserveFaceAndVertexCount(2, 4)
|
||||
self.addVertex(-x/2, -y/2, 0)
|
||||
self.addVertex(x/2, -y/2, 0)
|
||||
self.addVertex(x/2, y/2, 0)
|
||||
self.addVertex(-x/2, y/2, 0)
|
||||
self.addQuad(0, 1, 2, 3)
|
||||
|
||||
def processGeometryTriangleSet2D(self, node):
|
||||
verts = readFloatArray(node, "vertices", ())
|
||||
num_faces = len(verts) // 6;
|
||||
verts = [(verts[i], verts[i+1], 0) for i in range(0, 6 * num_faces, 2)]
|
||||
self.reserveFaceAndVertexCount(num_faces, num_faces * 3)
|
||||
for vert in verts:
|
||||
self.addVertex(*vert)
|
||||
|
||||
# The front face is on the +Z side, so CCW is a variable
|
||||
for i in range(0, num_faces*3, 3):
|
||||
a = Vector(*verts[i+2]) - Vector(*verts[i])
|
||||
b = Vector(*verts[i+1]) - Vector(*verts[i])
|
||||
self.addTriFlip(i, i+1, i+2, a.x*b.y > a.y*b.x)
|
||||
|
||||
# General purpose polygon mesh
|
||||
|
||||
def processGeometryIndexedFaceSet(self, node):
|
||||
faces = readIndex(node, "coordIndex")
|
||||
ccw = self.startCoordMesh(node, sum([len(face) - 2 for face in faces]))
|
||||
|
||||
for face in faces:
|
||||
if len(face) == 3:
|
||||
self.addTriFlip(face[0], face[1], face[2], ccw)
|
||||
elif len(face) > 3:
|
||||
self.addFace(face, ccw)
|
||||
|
||||
geometry_importers = {
|
||||
"IndexedFaceSet": processGeometryIndexedFaceSet,
|
||||
"IndexedTriangleSet": processGeometryIndexedTriangleSet,
|
||||
"IndexedTriangleStripSet": processGeometryIndexedTriangleStripSet,
|
||||
"IndexedTriangleFanSet": processGeometryIndexedTriangleFanSet,
|
||||
"TriangleSet": processGeometryTriangleSet,
|
||||
"TriangleStripSet": processGeometryTriangleStripSet,
|
||||
"TriangleFanSet": processGeometryTriangleFanSet,
|
||||
"QuadSet": processGeometryQuadSet,
|
||||
"IndexedQuadSet": processGeometryIndexedQuadSet,
|
||||
"TriangleSet2D": processGeometryTriangleSet2D,
|
||||
"Rectangle2D": processGeometryRectangle2D,
|
||||
"Disk2D": processGeometryDisk2D,
|
||||
"ElevationGrid": processGeometryElevationGrid,
|
||||
"Extrusion": processGeometryExtrusion,
|
||||
"Sphere": processGeometrySphere,
|
||||
"Box": processGeometryBox,
|
||||
"Cylinder": processGeometryCylinder,
|
||||
"Cone": processGeometryCone
|
||||
}
|
||||
|
||||
# Parses the Coordinate.@point field, fills the verts array.
|
||||
def readVertices(self, node):
|
||||
for c in node:
|
||||
if c.tag == "Coordinate":
|
||||
c = self.resolveDefUse(c)
|
||||
if not c is None:
|
||||
pt = c.attrib.get("point")
|
||||
if pt:
|
||||
co = [float(x) for x in pt.split()]
|
||||
num_verts = len(co) // 3
|
||||
self.verts = numpy.empty((4, num_verts), dtype=numpy.float32)
|
||||
self.verts[3,:] = numpy.ones((num_verts), dtype=numpy.float32)
|
||||
# Group by three
|
||||
for i in range(num_verts):
|
||||
self.verts[:3,i] = co[3*i:3*i+3]
|
||||
|
||||
# Mesh builder helpers
|
||||
|
||||
def reserveFaceAndVertexCount(self, num_faces, num_verts):
|
||||
# Unlike the Cura MeshBuilder, we use 4-vectors stored as columns for easier transform
|
||||
self.verts = numpy.zeros((4, num_verts), dtype=numpy.float32)
|
||||
self.verts[3,:] = numpy.ones((num_verts), dtype=numpy.float32)
|
||||
self.num_verts = 0
|
||||
self.reserveFaceCount(num_faces)
|
||||
|
||||
def reserveFaceCount(self, num_faces):
|
||||
self.faces = numpy.zeros((num_faces, 3), dtype=numpy.int32)
|
||||
self.num_faces = 0
|
||||
|
||||
def getVertexCount(self):
|
||||
return self.verts.shape[1]
|
||||
|
||||
def addVertex(self, x, y, z):
|
||||
self.verts[0, self.num_verts] = x
|
||||
self.verts[1, self.num_verts] = y
|
||||
self.verts[2, self.num_verts] = z
|
||||
self.num_verts += 1
|
||||
|
||||
# Indices are 0-based for this shape, but they won't be zero-based in the merged mesh
|
||||
def addTri(self, a, b, c):
|
||||
self.faces[self.num_faces, 0] = self.index_base + a
|
||||
self.faces[self.num_faces, 1] = self.index_base + b
|
||||
self.faces[self.num_faces, 2] = self.index_base + c
|
||||
self.num_faces += 1
|
||||
|
||||
def addTriFlip(self, a, b, c, ccw):
|
||||
if ccw:
|
||||
self.addTri(a, b, c)
|
||||
else:
|
||||
self.addTri(b, a, c)
|
||||
|
||||
# Needs to be convex, but not necessaily planar
|
||||
# Assumed ccw, cut along the ac diagonal
|
||||
def addQuad(self, a, b, c, d):
|
||||
self.addTri(a, b, c)
|
||||
self.addTri(c, d, a)
|
||||
|
||||
def addQuadFlip(self, a, b, c, d, ccw):
|
||||
if ccw:
|
||||
self.addTri(a, b, c)
|
||||
self.addTri(c, d, a)
|
||||
else:
|
||||
self.addTri(a, c, b)
|
||||
self.addTri(c, a, d)
|
||||
|
||||
|
||||
# Arbitrary polygon triangulation.
|
||||
# Doesn't assume convexity and doesn't check the "convex" flag in the file.
|
||||
# Works by the "cutting of ears" algorithm:
|
||||
# - Find an outer vertex with the smallest angle and no vertices inside its adjacent triangle
|
||||
# - Remove the triangle at that vertex
|
||||
# - Repeat until done
|
||||
# Vertex coordinates are supposed to be already set
|
||||
def addFace(self, indices, ccw):
|
||||
# Resolve indices to coordinates for faster math
|
||||
face = [Vector(data=self.verts[0:3, i]) for i in indices]
|
||||
|
||||
# Need a normal to the plane so that we can know which vertices form inner angles
|
||||
normal = findOuterNormal(face)
|
||||
|
||||
if not normal: # Couldn't find an outer edge, non-planar polygon maybe?
|
||||
return
|
||||
|
||||
# Find the vertex with the smallest inner angle and no points inside, cut off. Repeat until done
|
||||
n = len(face)
|
||||
vi = [i for i in range(n)] # We'll be using this to kick vertices from the face
|
||||
while n > 3:
|
||||
max_cos = EPSILON # We don't want to check anything on Pi angles
|
||||
i_min = 0 # max cos corresponds to min angle
|
||||
for i in range(n):
|
||||
inext = (i + 1) % n
|
||||
iprev = (i + n - 1) % n
|
||||
v = face[vi[i]]
|
||||
next = face[vi[inext]] - v
|
||||
prev = face[vi[iprev]] - v
|
||||
nextXprev = next.cross(prev)
|
||||
if nextXprev.dot(normal) > EPSILON: # If it's an inner angle
|
||||
cos = next.dot(prev) / (next.length() * prev.length())
|
||||
if cos > max_cos:
|
||||
# Check if there are vertices inside the triangle
|
||||
no_points_inside = True
|
||||
for j in range(n):
|
||||
if j != i and j != iprev and j != inext:
|
||||
vx = face[vi[j]] - v
|
||||
if pointInsideTriangle(vx, next, prev, nextXprev):
|
||||
no_points_inside = False
|
||||
break
|
||||
|
||||
if no_points_inside:
|
||||
max_cos = cos
|
||||
i_min = i
|
||||
|
||||
self.addTriFlip(indices[vi[(i_min + n - 1) % n]], indices[vi[i_min]], indices[vi[(i_min + 1) % n]], ccw)
|
||||
vi.pop(i_min)
|
||||
n -= 1
|
||||
self.addTriFlip(indices[vi[0]], indices[vi[1]], indices[vi[2]], ccw)
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# X3D field parsers
|
||||
# ------------------------------------------------------------
|
||||
def readFloatArray(node, attr, default):
|
||||
s = node.attrib.get(attr)
|
||||
if not s:
|
||||
return default
|
||||
return [float(x) for x in s.split()]
|
||||
|
||||
def readIntArray(node, attr, default):
|
||||
s = node.attrib.get(attr)
|
||||
if not s:
|
||||
return default
|
||||
return [int(x, 0) for x in s.split()]
|
||||
|
||||
def readFloat(node, attr, default):
|
||||
s = node.attrib.get(attr)
|
||||
if not s:
|
||||
return default
|
||||
return float(s)
|
||||
|
||||
def readInt(node, attr, default):
|
||||
s = node.attrib.get(attr)
|
||||
if not s:
|
||||
return default
|
||||
return int(s, 0)
|
||||
|
||||
def readBoolean(node, attr, default):
|
||||
s = node.attrib.get(attr)
|
||||
if not s:
|
||||
return default
|
||||
return s.lower() == "true"
|
||||
|
||||
def readVector(node, attr, default):
|
||||
v = readFloatArray(node, attr, default)
|
||||
return Vector(v[0], v[1], v[2])
|
||||
|
||||
def readRotation(node, attr, default):
|
||||
v = readFloatArray(node, attr, default)
|
||||
return (v[3], Vector(v[0], v[1], v[2]))
|
||||
|
||||
# Returns the -1-separated runs
|
||||
def readIndex(node, attr):
|
||||
v = readIntArray(node, attr, [])
|
||||
chunks = []
|
||||
chunk = []
|
||||
for i in range(len(v)):
|
||||
if v[i] == -1:
|
||||
if chunk:
|
||||
chunks.append(chunk)
|
||||
chunk = []
|
||||
else:
|
||||
chunk.append(v[i])
|
||||
if chunk:
|
||||
chunks.append(chunk)
|
||||
return chunks
|
||||
|
||||
# Given a face as a sequence of vectors, returns a normal to the polygon place that forms a right triple
|
||||
# with a vector along the polygon sequence and a vector backwards
|
||||
def findOuterNormal(face):
|
||||
n = len(face)
|
||||
for i in range(n):
|
||||
for j in range(i+1, n):
|
||||
edge = face[j] - face[i]
|
||||
if edge.length() > EPSILON:
|
||||
edge = edge.normalized()
|
||||
prev_rejection = Vector()
|
||||
is_outer = True
|
||||
for k in range(n):
|
||||
if k != i and k != j:
|
||||
pt = face[k] - face[i]
|
||||
pte = pt.dot(edge)
|
||||
rejection = pt - edge*pte
|
||||
if rejection.dot(prev_rejection) < -EPSILON: # points on both sides of the edge - not an outer one
|
||||
is_outer = False
|
||||
break
|
||||
elif rejection.length() > prev_rejection.length(): # Pick a greater rejection for numeric stability
|
||||
prev_rejection = rejection
|
||||
|
||||
if is_outer: # Found an outer edge, prev_rejection is the rejection inside the face. Generate a normal.
|
||||
return edge.cross(prev_rejection)
|
||||
|
||||
return False
|
||||
|
||||
# Given two *collinear* vectors a and b, returns the coefficient that takes b to a.
|
||||
# No error handling.
|
||||
# For stability, taking the ration between the biggest coordinates would be better...
|
||||
def ratio(a, b):
|
||||
if b.x > EPSILON or b.x < -EPSILON:
|
||||
return a.x / b.x
|
||||
elif b.y > EPSILON or b.y < -EPSILON:
|
||||
return a.y / b.y
|
||||
else:
|
||||
return a.z / b.z
|
||||
|
||||
def pointInsideTriangle(vx, next, prev, nextXprev):
|
||||
vxXprev = vx.cross(prev)
|
||||
r = ratio(vxXprev, nextXprev)
|
||||
if r < 0:
|
||||
return False
|
||||
vxXnext = vx.cross(next);
|
||||
s = -ratio(vxXnext, nextXprev)
|
||||
return s > 0 and (s + r) < 1
|
||||
|
26
plugins/X3DReader/__init__.py
Normal file
26
plugins/X3DReader/__init__.py
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Seva Alekseyev with National Institutes of Health, 2016
|
||||
|
||||
from . import X3DReader
|
||||
|
||||
from UM.i18n import i18nCatalog
|
||||
catalog = i18nCatalog("cura")
|
||||
|
||||
def getMetaData():
|
||||
return {
|
||||
"plugin": {
|
||||
"name": catalog.i18nc("@label", "X3D Reader"),
|
||||
"author": "Seva Alekseyev",
|
||||
"version": "0.5",
|
||||
"description": catalog.i18nc("@info:whatsthis", "Provides support for reading X3D files."),
|
||||
"api": 3
|
||||
},
|
||||
"mesh_reader": [
|
||||
{
|
||||
"extension": "x3d",
|
||||
"description": catalog.i18nc("@item:inlistbox", "X3D File")
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
def register(app):
|
||||
return { "mesh_reader": X3DReader.X3DReader() }
|
|
@ -16,10 +16,6 @@ fragment =
|
|||
uniform sampler2D u_layer0;
|
||||
uniform sampler2D u_layer1;
|
||||
uniform sampler2D u_layer2;
|
||||
uniform sampler2D u_layer3;
|
||||
|
||||
uniform float u_imageWidth;
|
||||
uniform float u_imageHeight;
|
||||
|
||||
uniform vec2 u_offset[9];
|
||||
|
||||
|
@ -27,6 +23,10 @@ fragment =
|
|||
uniform vec4 u_outline_color;
|
||||
uniform vec4 u_error_color;
|
||||
|
||||
const vec3 x_axis = vec3(1.0, 0.0, 0.0);
|
||||
const vec3 y_axis = vec3(0.0, 1.0, 0.0);
|
||||
const vec3 z_axis = vec3(0.0, 0.0, 1.0);
|
||||
|
||||
varying vec2 v_uvs;
|
||||
|
||||
float kernel[9];
|
||||
|
@ -55,14 +55,21 @@ fragment =
|
|||
sum += color * (kernel[i] / u_outline_strength);
|
||||
}
|
||||
|
||||
gl_FragColor = mix(result, vec4(abs(sum.a)) * u_outline_color, abs(sum.a));
|
||||
vec4 layer1 = texture2D(u_layer1, v_uvs);
|
||||
if((layer1.rgb == x_axis || layer1.rgb == y_axis || layer1.rgb == z_axis))
|
||||
{
|
||||
gl_FragColor = result;
|
||||
}
|
||||
else
|
||||
{
|
||||
gl_FragColor = mix(result, vec4(abs(sum.a)) * u_outline_color, abs(sum.a));
|
||||
}
|
||||
}
|
||||
|
||||
[defaults]
|
||||
u_layer0 = 0
|
||||
u_layer1 = 1
|
||||
u_layer2 = 2
|
||||
u_layer3 = 3
|
||||
u_outline_strength = 1.0
|
||||
u_outline_color = [0.05, 0.66, 0.89, 1.0]
|
||||
u_error_color = [1.0, 0.0, 0.0, 1.0]
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# Copyright (c) 2016 Ultimaker B.V.
|
||||
# Cura is released under the terms of the AGPLv3 or higher.
|
||||
|
||||
import math
|
||||
import copy
|
||||
import io
|
||||
import xml.etree.ElementTree as ET
|
||||
import uuid
|
||||
|
||||
from UM.Resources import Resources
|
||||
from UM.Logger import Logger
|
||||
from UM.Util import parseBool
|
||||
from cura.CuraApplication import CuraApplication
|
||||
|
||||
import UM.Dictionary
|
||||
from UM.Settings.InstanceContainer import InstanceContainer
|
||||
|
@ -18,55 +18,34 @@ from UM.Settings.ContainerRegistry import ContainerRegistry
|
|||
class XmlMaterialProfile(InstanceContainer):
|
||||
def __init__(self, container_id, *args, **kwargs):
|
||||
super().__init__(container_id, *args, **kwargs)
|
||||
self._inherited_files = []
|
||||
|
||||
## Overridden from InstanceContainer
|
||||
def duplicate(self, new_id, new_name = None):
|
||||
base_file = self.getMetaDataEntry("base_file", None)
|
||||
|
||||
if base_file != self.id:
|
||||
containers = ContainerRegistry.getInstance().findInstanceContainers(id = base_file)
|
||||
if containers:
|
||||
new_basefile = containers[0].duplicate(self.getMetaDataEntry("brand") + "_" + new_id, new_name)
|
||||
base_file = new_basefile.id
|
||||
UM.Settings.ContainerRegistry.getInstance().addContainer(new_basefile)
|
||||
|
||||
new_id = self.getMetaDataEntry("brand") + "_" + new_id + "_" + self.getDefinition().getId()
|
||||
variant = self.getMetaDataEntry("variant")
|
||||
if variant:
|
||||
variant_containers = ContainerRegistry.getInstance().findInstanceContainers(id = variant)
|
||||
if variant_containers:
|
||||
new_id += "_" + variant_containers[0].getName().replace(" ", "_")
|
||||
has_base_file = True
|
||||
else:
|
||||
has_base_file = False
|
||||
|
||||
new_id = ContainerRegistry.getInstance().createUniqueName("material", self._id, new_id, "")
|
||||
result = super().duplicate(new_id, new_name)
|
||||
if has_base_file:
|
||||
result.setMetaDataEntry("base_file", base_file)
|
||||
else:
|
||||
result.setMetaDataEntry("base_file", result.id)
|
||||
return result
|
||||
def getInheritedFiles(self):
|
||||
return self._inherited_files
|
||||
|
||||
## Overridden from InstanceContainer
|
||||
def setReadOnly(self, read_only):
|
||||
super().setReadOnly(read_only)
|
||||
|
||||
basefile = self.getMetaDataEntry("base_file", self._id) #if basefile is none, this is a basefile.
|
||||
for container in ContainerRegistry.getInstance().findInstanceContainers(base_file = basefile):
|
||||
container._read_only = read_only
|
||||
basefile = self.getMetaDataEntry("base_file", self._id) # if basefile is self.id, this is a basefile.
|
||||
for container in UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(base_file = basefile):
|
||||
container._read_only = read_only # prevent loop instead of calling setReadOnly
|
||||
|
||||
## Overridden from InstanceContainer
|
||||
# set the meta data for all machine / variant combinations
|
||||
def setMetaDataEntry(self, key, value):
|
||||
if self.isReadOnly():
|
||||
return
|
||||
if self.getMetaDataEntry(key, None) == value:
|
||||
# Prevent loop caused by for loop.
|
||||
return
|
||||
|
||||
super().setMetaDataEntry(key, value)
|
||||
|
||||
basefile = self.getMetaDataEntry("base_file", self._id) #if basefile is none, this is a basefile.
|
||||
basefile = self.getMetaDataEntry("base_file", self._id) #if basefile is self.id, this is a basefile.
|
||||
# Update all containers that share GUID and basefile
|
||||
for container in ContainerRegistry.getInstance().findInstanceContainers(base_file = basefile):
|
||||
container.setMetaData(copy.deepcopy(self._metadata))
|
||||
for container in UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(base_file = basefile):
|
||||
container.setMetaDataEntry(key, value)
|
||||
|
||||
## Overridden from InstanceContainer, similar to setMetaDataEntry.
|
||||
# without this function the setName would only set the name of the specific nozzle / material / machine combination container
|
||||
|
@ -81,7 +60,7 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
|
||||
super().setName(new_name)
|
||||
|
||||
basefile = self.getMetaDataEntry("base_file", self._id) # if basefile is none, this is a basefile.
|
||||
basefile = self.getMetaDataEntry("base_file", self._id) # if basefile is self.id, this is a basefile.
|
||||
# Update the basefile as well, this is actually what we're trying to do
|
||||
# Update all containers that share GUID and basefile
|
||||
containers = ContainerRegistry.getInstance().findInstanceContainers(base_file = basefile)
|
||||
|
@ -89,17 +68,20 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
container.setName(new_name)
|
||||
|
||||
## Overridden from InstanceContainer
|
||||
def setProperty(self, key, property_name, property_value, container = None):
|
||||
if self.isReadOnly():
|
||||
return
|
||||
|
||||
super().setProperty(key, property_name, property_value)
|
||||
|
||||
basefile = self.getMetaDataEntry("base_file", self._id) #if basefile is none, this is a basefile.
|
||||
for container in ContainerRegistry.getInstance().findInstanceContainers(base_file = basefile):
|
||||
container._dirty = True
|
||||
# def setProperty(self, key, property_name, property_value, container = None):
|
||||
# if self.isReadOnly():
|
||||
# return
|
||||
#
|
||||
# super().setProperty(key, property_name, property_value)
|
||||
#
|
||||
# basefile = self.getMetaDataEntry("base_file", self._id) #if basefile is self.id, this is a basefile.
|
||||
# for container in UM.Settings.ContainerRegistry.getInstance().findInstanceContainers(base_file = basefile):
|
||||
# if not container.isReadOnly():
|
||||
# container.setDirty(True)
|
||||
|
||||
## Overridden from InstanceContainer
|
||||
# base file: global settings + supported machines
|
||||
# machine / variant combination: only changes for itself.
|
||||
def serialize(self):
|
||||
registry = ContainerRegistry.getInstance()
|
||||
|
||||
|
@ -108,7 +90,7 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
# Since we create an instance of XmlMaterialProfile for each machine and nozzle in the profile,
|
||||
# we should only serialize the "base" material definition, since that can then take care of
|
||||
# serializing the machine/nozzle specific profiles.
|
||||
raise NotImplementedError("Cannot serialize non-root XML materials")
|
||||
raise NotImplementedError("Ignoring serializing non-root XML materials, the data is contained in the base material")
|
||||
|
||||
builder = ET.TreeBuilder()
|
||||
|
||||
|
@ -150,6 +132,10 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
|
||||
for key, value in metadata.items():
|
||||
builder.start(key)
|
||||
# Normally value is a string.
|
||||
# Nones get handled well.
|
||||
if isinstance(value, bool):
|
||||
value = str(value) # parseBool in deserialize expects 'True'.
|
||||
builder.data(value)
|
||||
builder.end(key)
|
||||
|
||||
|
@ -177,7 +163,7 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
machine_container_map = {}
|
||||
machine_nozzle_map = {}
|
||||
|
||||
all_containers = registry.findInstanceContainers(GUID = self.getMetaDataEntry("GUID"))
|
||||
all_containers = registry.findInstanceContainers(GUID = self.getMetaDataEntry("GUID"), base_file = self._id)
|
||||
for container in all_containers:
|
||||
definition_id = container.getDefinition().id
|
||||
if definition_id == "fdmprinter":
|
||||
|
@ -201,7 +187,8 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
try:
|
||||
product = UM.Dictionary.findKey(self.__product_id_map, definition_id)
|
||||
except ValueError:
|
||||
continue
|
||||
# An unknown product id; export it anyway
|
||||
product = definition_id
|
||||
|
||||
builder.start("machine")
|
||||
builder.start("machine_identifier", { "manufacturer": definition.getMetaDataEntry("manufacturer", ""), "product": product})
|
||||
|
@ -220,7 +207,17 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
if not variant_containers:
|
||||
continue
|
||||
|
||||
builder.start("hotend", { "id": variant_containers[0].getName() })
|
||||
builder.start("hotend", {"id": variant_containers[0].getName()})
|
||||
|
||||
# Compatible is a special case, as it's added as a meta data entry (instead of an instance).
|
||||
compatible = hotend.getMetaDataEntry("compatible")
|
||||
if compatible is not None:
|
||||
builder.start("setting", {"key": "hardware compatible"})
|
||||
if compatible:
|
||||
builder.data("yes")
|
||||
else:
|
||||
builder.data("no")
|
||||
builder.end("setting")
|
||||
|
||||
for instance in hotend.findInstances():
|
||||
if container.getInstance(instance.definition.key) and container.getProperty(instance.definition.key, "value") == instance.value:
|
||||
|
@ -242,10 +239,115 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
_indent(root)
|
||||
stream = io.StringIO()
|
||||
tree = ET.ElementTree(root)
|
||||
tree.write(stream, "unicode", True)
|
||||
tree.write(stream, encoding="unicode", xml_declaration=True)
|
||||
|
||||
return stream.getvalue()
|
||||
|
||||
# Recursively resolve loading inherited files
|
||||
def _resolveInheritance(self, file_name):
|
||||
xml = self._loadFile(file_name)
|
||||
|
||||
inherits = xml.find("./um:inherits", self.__namespaces)
|
||||
if inherits is not None:
|
||||
inherited = self._resolveInheritance(inherits.text)
|
||||
xml = self._mergeXML(inherited, xml)
|
||||
|
||||
return xml
|
||||
|
||||
def _loadFile(self, file_name):
|
||||
path = Resources.getPath(CuraApplication.getInstance().ResourceTypes.MaterialInstanceContainer, file_name + ".xml.fdm_material")
|
||||
|
||||
with open(path, encoding="utf-8") as f:
|
||||
contents = f.read()
|
||||
|
||||
self._inherited_files.append(path)
|
||||
return ET.fromstring(contents)
|
||||
|
||||
# The XML material profile can have specific settings for machines.
|
||||
# Some machines share profiles, so they are only created once.
|
||||
# This function duplicates those elements so that each machine tag only has one identifier.
|
||||
def _expandMachinesXML(self, element):
|
||||
settings_element = element.find("./um:settings", self.__namespaces)
|
||||
machines = settings_element.iterfind("./um:machine", self.__namespaces)
|
||||
machines_to_add = []
|
||||
machines_to_remove = []
|
||||
for machine in machines:
|
||||
identifiers = list(machine.iterfind("./um:machine_identifier", self.__namespaces))
|
||||
has_multiple_identifiers = len(identifiers) > 1
|
||||
if has_multiple_identifiers:
|
||||
# Multiple identifiers found. We need to create a new machine element and copy all it's settings there.
|
||||
for identifier in identifiers:
|
||||
new_machine = copy.deepcopy(machine)
|
||||
# Create list of identifiers that need to be removed from the copied element.
|
||||
other_identifiers = [self._createKey(other_identifier) for other_identifier in identifiers if other_identifier is not identifier]
|
||||
# As we can only remove by exact object reference, we need to look through the identifiers of copied machine.
|
||||
new_machine_identifiers = list(new_machine.iterfind("./um:machine_identifier", self.__namespaces))
|
||||
for new_machine_identifier in new_machine_identifiers:
|
||||
key = self._createKey(new_machine_identifier)
|
||||
# Key was in identifiers to remove, so this element needs to be purged
|
||||
if key in other_identifiers:
|
||||
new_machine.remove(new_machine_identifier)
|
||||
machines_to_add.append(new_machine)
|
||||
machines_to_remove.append(machine)
|
||||
else:
|
||||
pass # Machine only has one identifier. Nothing to do.
|
||||
# Remove & add all required machines.
|
||||
for machine_to_remove in machines_to_remove:
|
||||
settings_element.remove(machine_to_remove)
|
||||
for machine_to_add in machines_to_add:
|
||||
settings_element.append(machine_to_add)
|
||||
return element
|
||||
|
||||
def _mergeXML(self, first, second):
|
||||
result = copy.deepcopy(first)
|
||||
self._combineElement(self._expandMachinesXML(result), self._expandMachinesXML(second))
|
||||
return result
|
||||
|
||||
def _createKey(self, element):
|
||||
key = element.tag.split("}")[-1]
|
||||
if "key" in element.attrib:
|
||||
key += " key:" + element.attrib["key"]
|
||||
if "manufacturer" in element.attrib:
|
||||
key += " manufacturer:" + element.attrib["manufacturer"]
|
||||
if "product" in element.attrib:
|
||||
key += " product:" + element.attrib["product"]
|
||||
if key == "machine":
|
||||
for item in element:
|
||||
if "machine_identifier" in item.tag:
|
||||
key += " " + item.attrib["product"]
|
||||
return key
|
||||
|
||||
# Recursively merges XML elements. Updates either the text or children if another element is found in first.
|
||||
# If it does not exist, copies it from second.
|
||||
def _combineElement(self, first, second):
|
||||
# Create a mapping from tag name to element.
|
||||
|
||||
mapping = {}
|
||||
for element in first:
|
||||
key = self._createKey(element)
|
||||
mapping[key] = element
|
||||
for element in second:
|
||||
key = self._createKey(element)
|
||||
if len(element): # Check if element has children.
|
||||
try:
|
||||
if "setting" in element.tag and not "settings" in element.tag:
|
||||
# Setting can have points in it. In that case, delete all values and override them.
|
||||
for child in list(mapping[key]):
|
||||
mapping[key].remove(child)
|
||||
for child in element:
|
||||
mapping[key].append(child)
|
||||
else:
|
||||
self._combineElement(mapping[key], element) # Multiple elements, handle those.
|
||||
except KeyError:
|
||||
mapping[key] = element
|
||||
first.append(element)
|
||||
else:
|
||||
try:
|
||||
mapping[key].text = element.text
|
||||
except KeyError: # Not in the mapping, so simply add it
|
||||
mapping[key] = element
|
||||
first.append(element)
|
||||
|
||||
## Overridden from InstanceContainer
|
||||
def deserialize(self, serialized):
|
||||
data = ET.fromstring(serialized)
|
||||
|
@ -256,6 +358,11 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
# TODO: Add material verfication
|
||||
self.addMetaDataEntry("status", "unknown")
|
||||
|
||||
inherits = data.find("./um:inherits", self.__namespaces)
|
||||
if inherits is not None:
|
||||
inherited = self._resolveInheritance(inherits.text)
|
||||
data = self._mergeXML(inherited, data)
|
||||
|
||||
metadata = data.iterfind("./um:metadata/*", self.__namespaces)
|
||||
for entry in metadata:
|
||||
tag_name = _tag_without_namespace(entry)
|
||||
|
@ -312,6 +419,8 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
else:
|
||||
Logger.log("d", "Unsupported material setting %s", key)
|
||||
|
||||
self.addMetaDataEntry("compatible", global_compatibility)
|
||||
|
||||
self._dirty = False
|
||||
|
||||
machines = data.iterfind("./um:settings/um:machine", self.__namespaces)
|
||||
|
@ -333,8 +442,8 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
for identifier in identifiers:
|
||||
machine_id = self.__product_id_map.get(identifier.get("product"), None)
|
||||
if machine_id is None:
|
||||
Logger.log("w", "Cannot create material for unknown machine %s", identifier.get("product"))
|
||||
continue
|
||||
# Lets try again with some naive heuristics.
|
||||
machine_id = identifier.get("product").replace(" ", "").lower()
|
||||
|
||||
definitions = ContainerRegistry.getInstance().findDefinitionContainers(id = machine_id)
|
||||
if not definitions:
|
||||
|
@ -348,6 +457,8 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
new_material.setName(self.getName())
|
||||
new_material.setMetaData(copy.deepcopy(self.getMetaData()))
|
||||
new_material.setDefinition(definition)
|
||||
# Don't use setMetadata, as that overrides it for all materials with same base file
|
||||
new_material.getMetaData()["compatible"] = machine_compatibility
|
||||
|
||||
for key, value in global_setting_values.items():
|
||||
new_material.setProperty(key, "value", value, definition)
|
||||
|
@ -359,6 +470,7 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
|
||||
ContainerRegistry.getInstance().addContainer(new_material)
|
||||
|
||||
|
||||
hotends = machine.iterfind("./um:hotend", self.__namespaces)
|
||||
for hotend in hotends:
|
||||
hotend_id = hotend.get("id")
|
||||
|
@ -387,14 +499,13 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
else:
|
||||
Logger.log("d", "Unsupported material setting %s", key)
|
||||
|
||||
if not hotend_compatibility:
|
||||
continue
|
||||
|
||||
new_hotend_material = XmlMaterialProfile(self.id + "_" + machine_id + "_" + hotend_id.replace(" ", "_"))
|
||||
new_hotend_material.setName(self.getName())
|
||||
new_hotend_material.setMetaData(copy.deepcopy(self.getMetaData()))
|
||||
new_hotend_material.setDefinition(definition)
|
||||
new_hotend_material.addMetaDataEntry("variant", variant_containers[0].id)
|
||||
# Don't use setMetadata, as that overrides it for all materials with same base file
|
||||
new_hotend_material.getMetaData()["compatible"] = hotend_compatibility
|
||||
|
||||
for key, value in global_setting_values.items():
|
||||
new_hotend_material.setProperty(key, "value", value, definition)
|
||||
|
@ -408,12 +519,6 @@ class XmlMaterialProfile(InstanceContainer):
|
|||
new_hotend_material._dirty = False
|
||||
ContainerRegistry.getInstance().addContainer(new_hotend_material)
|
||||
|
||||
if not global_compatibility:
|
||||
# Change the type of this container so it is not shown as an option in menus.
|
||||
# This uses InstanceContainer.setMetaDataEntry because otherwise all containers that
|
||||
# share this basefile are also updated.
|
||||
super().setMetaDataEntry("type", "incompatible_material")
|
||||
|
||||
def _addSettingElement(self, builder, instance):
|
||||
try:
|
||||
key = UM.Dictionary.findKey(self.__material_property_setting_map, instance.definition.key)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue