diff --git a/plugins/ImageReader/ConfigUI.qml b/plugins/ImageReader/ConfigUI.qml index 47ba10778c..0429fae4e1 100644 --- a/plugins/ImageReader/ConfigUI.qml +++ b/plugins/ImageReader/ConfigUI.qml @@ -143,6 +143,52 @@ UM.Dialog } } + UM.TooltipArea { + Layout.fillWidth:true + height: childrenRect.height + text: catalog.i18nc("@info:tooltip","For lithophanes a simple logarithmic model for translucency is available. For height maps the pixel values correspond to heights linearly.") + Row { + width: parent.width + + Label { + text: "Color Model" + width: 150 * screenScaleFactor + anchors.verticalCenter: parent.verticalCenter + } + ComboBox { + id: color_model + objectName: "ColorModel" + model: [ catalog.i18nc("@item:inlistbox","Linear"), catalog.i18nc("@item:inlistbox","Translucency") ] + width: 180 * screenScaleFactor + onCurrentIndexChanged: { manager.onColorModelChanged(currentIndex) } + } + } + } + + UM.TooltipArea { + Layout.fillWidth:true + height: childrenRect.height + text: catalog.i18nc("@info:tooltip","The percentage of light penetrating a print with a thickness of 1 millimeter. Lowering this value increases the contrast in dark regions and decreases the contrast in light regions of the image.") + visible: color_model.currentText == catalog.i18nc("@item:inlistbox","Translucency") + Row { + width: parent.width + + Label { + text: catalog.i18nc("@action:label", "1mm Transmittance (%)") + width: 150 * screenScaleFactor + anchors.verticalCenter: parent.verticalCenter + } + TextField { + id: transmittance + objectName: "Transmittance" + focus: true + validator: RegExpValidator {regExp: /^[1-9]\d{0,2}([\,|\.]\d*)?$/} + width: 180 * screenScaleFactor + onTextChanged: { manager.onTransmittanceChanged(text) } + } + } + } + UM.TooltipArea { Layout.fillWidth:true height: childrenRect.height diff --git a/plugins/ImageReader/ImageReader.py b/plugins/ImageReader/ImageReader.py index e720ce4854..d6c2827d16 100644 --- a/plugins/ImageReader/ImageReader.py +++ b/plugins/ImageReader/ImageReader.py @@ -3,6 +3,8 @@ import numpy +import math + from PyQt5.QtGui import QImage, qRed, qGreen, qBlue from PyQt5.QtCore import Qt @@ -46,9 +48,9 @@ class ImageReader(MeshReader): def _read(self, file_name): size = max(self._ui.getWidth(), self._ui.getDepth()) - return self._generateSceneNode(file_name, size, self._ui.peak_height, self._ui.base_height, self._ui.smoothing, 512, self._ui.lighter_is_higher) + return self._generateSceneNode(file_name, size, self._ui.peak_height, self._ui.base_height, self._ui.smoothing, 512, self._ui.lighter_is_higher, self._ui.use_transparency_model, self._ui.transmittance_1mm) - def _generateSceneNode(self, file_name, xz_size, peak_height, base_height, blur_iterations, max_size, lighter_is_higher): + def _generateSceneNode(self, file_name, xz_size, peak_height, base_height, blur_iterations, max_size, lighter_is_higher, use_transparency_model, transmittance_1mm): scene_node = SceneNode() mesh = MeshBuilder() @@ -99,12 +101,14 @@ class ImageReader(MeshReader): for x in range(0, width): for y in range(0, height): qrgb = img.pixel(x, y) - avg = float(qRed(qrgb) + qGreen(qrgb) + qBlue(qrgb)) / (3 * 255) - height_data[y, x] = avg + if use_transparency_model: + height_data[y, x] = (0.299 * math.pow(qRed(qrgb) / 255.0, 2.2) + 0.587 * math.pow(qGreen(qrgb) / 255.0, 2.2) + 0.114 * math.pow(qBlue(qrgb) / 255.0, 2.2)) + else: + height_data[y, x] = (0.212655 * qRed(qrgb) + 0.715158 * qGreen(qrgb) + 0.072187 * qBlue(qrgb)) / 255 # fast computation ignoring gamma and degamma Job.yieldThread() - if not lighter_is_higher: + if lighter_is_higher == use_transparency_model: height_data = 1 - height_data for _ in range(0, blur_iterations): @@ -124,8 +128,15 @@ class ImageReader(MeshReader): Job.yieldThread() - height_data *= scale_vector.y - height_data += base_height + if use_transparency_model: + divisor = 1.0 / math.log(transmittance_1mm / 100.0) # log-base doesn't matter here. Precompute this value for faster computation of each pixel. + min_luminance = (transmittance_1mm / 100.0) ** (peak_height - base_height) + for (y, x) in numpy.ndindex(height_data.shape): + mapped_luminance = min_luminance + (1.0 - min_luminance) * height_data[y, x] + height_data[y, x] = base_height + divisor * math.log(mapped_luminance) # use same base as a couple lines above this + else: + height_data *= scale_vector.y + height_data += base_height heightmap_face_count = 2 * height_minus_one * width_minus_one total_face_count = heightmap_face_count + (width_minus_one * 2) * (height_minus_one * 2) + 2 diff --git a/plugins/ImageReader/ImageReaderUI.py b/plugins/ImageReader/ImageReaderUI.py index 213468a2ab..a61fabb742 100644 --- a/plugins/ImageReader/ImageReaderUI.py +++ b/plugins/ImageReader/ImageReaderUI.py @@ -34,6 +34,8 @@ class ImageReaderUI(QObject): self.peak_height = 2.5 self.smoothing = 1 self.lighter_is_higher = False; + self.use_transparency_model = True; + self.transmittance_1mm = 50.0; # based on pearl PLA self._ui_lock = threading.Lock() self._cancelled = False @@ -75,6 +77,7 @@ class ImageReaderUI(QObject): self._ui_view.findChild(QObject, "Base_Height").setProperty("text", str(self.base_height)) self._ui_view.findChild(QObject, "Peak_Height").setProperty("text", str(self.peak_height)) + self._ui_view.findChild(QObject, "Transmittance").setProperty("text", str(self.transmittance_1mm)) self._ui_view.findChild(QObject, "Smoothing").setProperty("value", self.smoothing) def _createConfigUI(self): @@ -144,3 +147,11 @@ class ImageReaderUI(QObject): @pyqtSlot(int) def onImageColorInvertChanged(self, value): self.lighter_is_higher = (value == 1) + + @pyqtSlot(int) + def onColorModelChanged(self, value): + self.use_transparency_model = (value == 0) + + @pyqtSlot(int) + def onTransmittanceChanged(self, value): + self.transmittance_1mm = value