mirror of
https://github.com/SoftFever/OrcaSlicer.git
synced 2025-10-26 10:11:10 -06:00
Merge branch 'master' of https://github.com/prusa3d/PrusaSlicer into et_adaptive_layer_height
This commit is contained in:
commit
3baf11f694
29 changed files with 2228 additions and 360 deletions
|
|
@ -1205,7 +1205,7 @@ ExPolygons variable_offset_inner_ex(const ExPolygon &expoly, const std::vector<s
|
|||
{
|
||||
#ifndef NDEBUG
|
||||
// Verify that the deltas are all non positive.
|
||||
for (const std::vector<float>& ds : deltas)
|
||||
for (const std::vector<float>& ds : deltas)
|
||||
for (float delta : ds)
|
||||
assert(delta <= 0.);
|
||||
assert(expoly.holes.size() + 1 == deltas.size());
|
||||
|
|
|
|||
|
|
@ -60,9 +60,9 @@ std::vector<float> contour_distance(const EdgeGrid::Grid &grid, const size_t idx
|
|||
for (size_t axis = 0; axis < 2; ++ axis) {
|
||||
double dx = std::abs(dir(axis));
|
||||
if (dx >= EPSILON) {
|
||||
double tedge = (dir(axis) > 0) ? (double(bbox.max(axis)) - EPSILON - this->pt(axis)) : (this->pt(axis) - double(bbox.min(axis)) - EPSILON);
|
||||
double tedge = (dir(axis) > 0) ? (double(bbox.max(axis)) - SCALED_EPSILON - this->pt(axis)) : (this->pt(axis) - double(bbox.min(axis)) - SCALED_EPSILON);
|
||||
if (tedge < dx)
|
||||
t = tedge / dx;
|
||||
t = std::min(t, tedge / dx);
|
||||
}
|
||||
}
|
||||
this->dir = dir;
|
||||
|
|
@ -70,6 +70,7 @@ std::vector<float> contour_distance(const EdgeGrid::Grid &grid, const size_t idx
|
|||
dir *= t;
|
||||
this->pt_end = (this->pt + dir).cast<coord_t>();
|
||||
this->t_min = 1.;
|
||||
assert(this->grid.bbox().contains(this->pt_start) && this->grid.bbox().contains(this->pt_end));
|
||||
}
|
||||
|
||||
bool operator()(coord_t iy, coord_t ix) {
|
||||
|
|
@ -361,7 +362,7 @@ static inline void smooth_compensation_banded(const Points &contour, float band,
|
|||
}
|
||||
|
||||
ExPolygon elephant_foot_compensation(const ExPolygon &input_expoly, const Flow &external_perimeter_flow, const double compensation)
|
||||
{
|
||||
{
|
||||
// The contour shall be wide enough to apply the external perimeter plus compensation on both sides.
|
||||
double min_contour_width = double(external_perimeter_flow.scaled_width() + external_perimeter_flow.scaled_spacing());
|
||||
double scaled_compensation = scale_(compensation);
|
||||
|
|
@ -369,39 +370,59 @@ ExPolygon elephant_foot_compensation(const ExPolygon &input_expoly, const Flow &
|
|||
// Make the search radius a bit larger for the averaging in contour_distance over a fan of rays to work.
|
||||
double search_radius = min_contour_width_compensated + min_contour_width * 0.5;
|
||||
|
||||
EdgeGrid::Grid grid;
|
||||
ExPolygon simplified = input_expoly.simplify(SCALED_EPSILON).front();
|
||||
BoundingBox bbox = get_extents(simplified.contour);
|
||||
bbox.offset(SCALED_EPSILON);
|
||||
grid.set_bbox(bbox);
|
||||
grid.create(simplified, coord_t(0.7 * search_radius));
|
||||
std::vector<std::vector<float>> deltas;
|
||||
deltas.reserve(simplified.holes.size() + 1);
|
||||
ExPolygon resampled(simplified);
|
||||
double resample_interval = scale_(0.5);
|
||||
for (size_t idx_contour = 0; idx_contour <= simplified.holes.size(); ++ idx_contour) {
|
||||
Polygon &poly = (idx_contour == 0) ? resampled.contour : resampled.holes[idx_contour - 1];
|
||||
std::vector<ResampledPoint> resampled_point_parameters;
|
||||
poly.points = resample_polygon(poly.points, resample_interval, resampled_point_parameters);
|
||||
std::vector<float> dists = contour_distance(grid, idx_contour, poly.points, resampled_point_parameters, search_radius);
|
||||
for (float &d : dists) {
|
||||
// printf("Point %d, Distance: %lf\n", int(&d - dists.data()), unscale<double>(d));
|
||||
// Convert contour width to available compensation distance.
|
||||
if (d < min_contour_width)
|
||||
d = 0.f;
|
||||
else if (d > min_contour_width_compensated)
|
||||
d = - float(scaled_compensation);
|
||||
else
|
||||
d = - (d - float(min_contour_width)) / 2.f;
|
||||
assert(d >= - float(scaled_compensation) && d <= 0.f);
|
||||
BoundingBox bbox = get_extents(input_expoly.contour);
|
||||
Point bbox_size = bbox.size();
|
||||
ExPolygon out;
|
||||
if (bbox_size.x() < min_contour_width_compensated + SCALED_EPSILON ||
|
||||
bbox_size.y() < min_contour_width_compensated + SCALED_EPSILON ||
|
||||
input_expoly.area() < min_contour_width_compensated * min_contour_width_compensated * 5.)
|
||||
{
|
||||
// The contour is tiny. Don't correct it.
|
||||
out = input_expoly;
|
||||
}
|
||||
else
|
||||
{
|
||||
EdgeGrid::Grid grid;
|
||||
ExPolygon simplified = input_expoly.simplify(SCALED_EPSILON).front();
|
||||
BoundingBox bbox = get_extents(simplified.contour);
|
||||
bbox.offset(SCALED_EPSILON);
|
||||
grid.set_bbox(bbox);
|
||||
grid.create(simplified, coord_t(0.7 * search_radius));
|
||||
std::vector<std::vector<float>> deltas;
|
||||
deltas.reserve(simplified.holes.size() + 1);
|
||||
ExPolygon resampled(simplified);
|
||||
double resample_interval = scale_(0.5);
|
||||
for (size_t idx_contour = 0; idx_contour <= simplified.holes.size(); ++ idx_contour) {
|
||||
Polygon &poly = (idx_contour == 0) ? resampled.contour : resampled.holes[idx_contour - 1];
|
||||
std::vector<ResampledPoint> resampled_point_parameters;
|
||||
poly.points = resample_polygon(poly.points, resample_interval, resampled_point_parameters);
|
||||
std::vector<float> dists = contour_distance(grid, idx_contour, poly.points, resampled_point_parameters, search_radius);
|
||||
for (float &d : dists) {
|
||||
// printf("Point %d, Distance: %lf\n", int(&d - dists.data()), unscale<double>(d));
|
||||
// Convert contour width to available compensation distance.
|
||||
if (d < min_contour_width)
|
||||
d = 0.f;
|
||||
else if (d > min_contour_width_compensated)
|
||||
d = - float(scaled_compensation);
|
||||
else
|
||||
d = - (d - float(min_contour_width)) / 2.f;
|
||||
assert(d >= - float(scaled_compensation) && d <= 0.f);
|
||||
}
|
||||
// smooth_compensation(dists, 0.4f, 10);
|
||||
smooth_compensation_banded(poly.points, float(0.8 * resample_interval), dists, 0.3f, 3);
|
||||
deltas.emplace_back(dists);
|
||||
}
|
||||
// smooth_compensation(dists, 0.4f, 10);
|
||||
smooth_compensation_banded(poly.points, float(0.8 * resample_interval), dists, 0.3f, 3);
|
||||
deltas.emplace_back(dists);
|
||||
|
||||
ExPolygons out_vec = variable_offset_inner_ex(resampled, deltas, 2.);
|
||||
assert(out_vec.size() == 1);
|
||||
if (out_vec.size() == 1)
|
||||
out = std::move(out_vec.front());
|
||||
else
|
||||
// Something went wrong, don't compensate.
|
||||
out = input_expoly;
|
||||
}
|
||||
|
||||
ExPolygons out = variable_offset_inner_ex(resampled, deltas, 2.);
|
||||
return out.front();
|
||||
return out;
|
||||
}
|
||||
|
||||
ExPolygons elephant_foot_compensation(const ExPolygons &input, const Flow &external_perimeter_flow, const double compensation)
|
||||
|
|
|
|||
|
|
@ -267,6 +267,15 @@ public:
|
|||
|
||||
//static inline std::string role_to_string(ExtrusionLoopRole role);
|
||||
|
||||
#ifndef NDEBUG
|
||||
bool validate() const {
|
||||
assert(this->first_point() == this->paths.back().polyline.points.back());
|
||||
for (size_t i = 1; i < paths.size(); ++ i)
|
||||
assert(this->paths[i - 1].polyline.points.back() == this->paths[i].polyline.points.front());
|
||||
return true;
|
||||
}
|
||||
#endif /* NDEBUG */
|
||||
|
||||
private:
|
||||
ExtrusionLoopRole m_loop_role;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -534,7 +534,8 @@ struct ContourPointData {
|
|||
// Verify whether the contour from point idx_start to point idx_end could be taken (whether all segments along the contour were not yet extruded).
|
||||
static bool could_take(const std::vector<ContourPointData> &contour_data, size_t idx_start, size_t idx_end)
|
||||
{
|
||||
for (size_t i = idx_start; i < idx_end; ) {
|
||||
assert(idx_start != idx_end);
|
||||
for (size_t i = idx_start; i != idx_end; ) {
|
||||
if (contour_data[i].segment_consumed || contour_data[i].point_consumed)
|
||||
return false;
|
||||
if (++ i == contour_data.size())
|
||||
|
|
@ -899,63 +900,86 @@ void Fill::connect_infill(Polylines &&infill_ordered, const ExPolygon &boundary_
|
|||
|
||||
// Mark the points and segments of split boundary as consumed if they are very close to some of the infill line.
|
||||
{
|
||||
const double clip_distance = scale_(this->spacing);
|
||||
//const double clip_distance = scale_(this->spacing);
|
||||
const double clip_distance = 3. * scale_(this->spacing);
|
||||
const double distance_colliding = scale_(this->spacing);
|
||||
mark_boundary_segments_touching_infill(boundary, boundary_data, bbox, infill_ordered, clip_distance, distance_colliding);
|
||||
}
|
||||
|
||||
// Chain infill_ordered.
|
||||
//FIXME run the following loop through a heap sorted by the shortest perimeter edge that could be taken.
|
||||
//length between two lines
|
||||
// Connection from end of one infill line to the start of another infill line.
|
||||
//const float length_max = scale_(this->spacing);
|
||||
const float length_max = scale_((2. / params.density) * this->spacing);
|
||||
size_t idx_chain_last = 0;
|
||||
// const float length_max = scale_((2. / params.density) * this->spacing);
|
||||
const float length_max = scale_((1000. / params.density) * this->spacing);
|
||||
std::vector<size_t> merged_with(infill_ordered.size());
|
||||
for (size_t i = 0; i < merged_with.size(); ++ i)
|
||||
merged_with[i] = i;
|
||||
struct ConnectionCost {
|
||||
ConnectionCost(size_t idx_first, double cost, bool reversed) : idx_first(idx_first), cost(cost), reversed(reversed) {}
|
||||
size_t idx_first;
|
||||
double cost;
|
||||
bool reversed;
|
||||
};
|
||||
std::vector<ConnectionCost> connections_sorted;
|
||||
connections_sorted.reserve(infill_ordered.size() * 2 - 2);
|
||||
for (size_t idx_chain = 1; idx_chain < infill_ordered.size(); ++ idx_chain) {
|
||||
Polyline &pl1 = infill_ordered[idx_chain_last];
|
||||
Polyline &pl2 = infill_ordered[idx_chain];
|
||||
const Polyline &pl1 = infill_ordered[idx_chain - 1];
|
||||
const Polyline &pl2 = infill_ordered[idx_chain];
|
||||
const std::pair<size_t, size_t> *cp1 = &map_infill_end_point_to_boundary[(idx_chain - 1) * 2 + 1];
|
||||
const std::pair<size_t, size_t> *cp2 = &map_infill_end_point_to_boundary[idx_chain * 2];
|
||||
const Points &contour = boundary[cp1->first];
|
||||
std::vector<ContourPointData> &contour_data = boundary_data[cp1->first];
|
||||
bool valid = false;
|
||||
bool reversed = false;
|
||||
const std::vector<ContourPointData> &contour_data = boundary_data[cp1->first];
|
||||
if (cp1->first == cp2->first) {
|
||||
// End points on the same contour. Try to connect them.
|
||||
float param_lo = (cp1->second == 0) ? 0.f : contour_data[cp1->second].param;
|
||||
float param_hi = (cp2->second == 0) ? 0.f : contour_data[cp2->second].param;
|
||||
float param_lo = (cp1->second == 0) ? 0.f : contour_data[cp1->second].param;
|
||||
float param_hi = (cp2->second == 0) ? 0.f : contour_data[cp2->second].param;
|
||||
float param_end = contour_data.front().param;
|
||||
bool reversed = false;
|
||||
if (param_lo > param_hi) {
|
||||
std::swap(param_lo, param_hi);
|
||||
std::swap(cp1, cp2);
|
||||
reversed = true;
|
||||
}
|
||||
assert(param_lo >= 0.f && param_lo <= param_end);
|
||||
assert(param_hi >= 0.f && param_hi <= param_end);
|
||||
float dist1 = param_hi - param_lo;
|
||||
float dist2 = param_lo + param_end - param_hi;
|
||||
if (dist1 > dist2) {
|
||||
std::swap(dist1, dist2);
|
||||
std::swap(cp1, cp2);
|
||||
reversed = ! reversed;
|
||||
}
|
||||
if (dist1 < length_max) {
|
||||
// Try to connect the shorter path.
|
||||
valid = could_take(contour_data, cp1->second, cp2->second);
|
||||
// Try to connect the longer path.
|
||||
if (! valid && dist2 < length_max) {
|
||||
std::swap(cp1, cp2);
|
||||
reversed = ! reversed;
|
||||
valid = could_take(contour_data, cp1->second, cp2->second);
|
||||
}
|
||||
}
|
||||
double len = param_hi - param_lo;
|
||||
if (len < length_max)
|
||||
connections_sorted.emplace_back(idx_chain - 1, len, reversed);
|
||||
len = param_lo + param_end - param_hi;
|
||||
if (len < length_max)
|
||||
connections_sorted.emplace_back(idx_chain - 1, len, ! reversed);
|
||||
}
|
||||
if (valid)
|
||||
take(pl1, std::move(pl2), contour, contour_data, cp1->second, cp2->second, reversed);
|
||||
else if (++ idx_chain_last < idx_chain)
|
||||
infill_ordered[idx_chain_last] = std::move(pl2);
|
||||
}
|
||||
infill_ordered.erase(infill_ordered.begin() + idx_chain_last + 1, infill_ordered.end());
|
||||
append(polylines_out, std::move(infill_ordered));
|
||||
std::sort(connections_sorted.begin(), connections_sorted.end(), [](const ConnectionCost& l, const ConnectionCost& r) { return l.cost < r.cost; });
|
||||
|
||||
size_t idx_chain_last = 0;
|
||||
for (ConnectionCost &connection_cost : connections_sorted) {
|
||||
const std::pair<size_t, size_t> *cp1 = &map_infill_end_point_to_boundary[connection_cost.idx_first * 2 + 1];
|
||||
const std::pair<size_t, size_t> *cp2 = &map_infill_end_point_to_boundary[(connection_cost.idx_first + 1) * 2];
|
||||
assert(cp1->first == cp2->first);
|
||||
std::vector<ContourPointData> &contour_data = boundary_data[cp1->first];
|
||||
if (connection_cost.reversed)
|
||||
std::swap(cp1, cp2);
|
||||
if (could_take(contour_data, cp1->second, cp2->second)) {
|
||||
// Indices of the polygons to be connected.
|
||||
size_t idx_first = connection_cost.idx_first;
|
||||
size_t idx_second = idx_first + 1;
|
||||
for (size_t last = idx_first;;) {
|
||||
size_t lower = merged_with[last];
|
||||
if (lower == last) {
|
||||
merged_with[idx_first] = lower;
|
||||
idx_first = lower;
|
||||
break;
|
||||
}
|
||||
last = lower;
|
||||
}
|
||||
// Connect the two polygons using the boundary contour.
|
||||
take(infill_ordered[idx_first], std::move(infill_ordered[idx_second]), boundary[cp1->first], contour_data, cp1->second, cp2->second, connection_cost.reversed);
|
||||
// Mark the second polygon as merged with the first one.
|
||||
merged_with[idx_second] = merged_with[idx_first];
|
||||
}
|
||||
}
|
||||
polylines_out.reserve(polylines_out.size() + std::count_if(infill_ordered.begin(), infill_ordered.end(), [](const Polyline &pl) { return ! pl.empty(); }));
|
||||
for (Polyline &pl : infill_ordered)
|
||||
if (! pl.empty())
|
||||
polylines_out.emplace_back(std::move(pl));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ void FillGyroid::_fill_surface_single(
|
|||
bb.merge(_align_to_grid(bb.min, Point(2*M_PI*distance, 2*M_PI*distance)));
|
||||
|
||||
// generate pattern
|
||||
Polylines polylines_square = make_gyroid_waves(
|
||||
Polylines polylines = make_gyroid_waves(
|
||||
scale_(this->z),
|
||||
density_adjusted,
|
||||
this->spacing,
|
||||
|
|
@ -177,22 +177,25 @@ void FillGyroid::_fill_surface_single(
|
|||
ceil(bb.size()(1) / distance) + 1.);
|
||||
|
||||
// shift the polyline to the grid origin
|
||||
for (Polyline &pl : polylines_square)
|
||||
for (Polyline &pl : polylines)
|
||||
pl.translate(bb.min);
|
||||
|
||||
Polylines polylines_chained = chain_polylines(intersection_pl(polylines_square, to_polygons(expolygon)));
|
||||
polylines = intersection_pl(polylines, to_polygons(expolygon));
|
||||
|
||||
size_t polylines_out_first_idx = polylines_out.size();
|
||||
if (! polylines_chained.empty()) {
|
||||
// connect lines
|
||||
if (! polylines.empty())
|
||||
// remove too small bits (larger than longer)
|
||||
polylines.erase(
|
||||
std::remove_if(polylines.begin(), polylines.end(), [this](const Polyline &pl) { return pl.length() < scale_(this->spacing * 3); }),
|
||||
polylines.end());
|
||||
|
||||
if (! polylines.empty()) {
|
||||
polylines = chain_polylines(polylines);
|
||||
// connect lines
|
||||
size_t polylines_out_first_idx = polylines_out.size();
|
||||
if (params.dont_connect)
|
||||
append(polylines_out, std::move(polylines_chained));
|
||||
append(polylines_out, std::move(polylines));
|
||||
else
|
||||
this->connect_infill(std::move(polylines_chained), expolygon, polylines_out, params);
|
||||
// remove too small bits (larger than longer)
|
||||
polylines_out.erase(
|
||||
std::remove_if(polylines_out.begin() + polylines_out_first_idx, polylines_out.end(), [this](const Polyline &pl){ return pl.length() < scale_(this->spacing * 3); }),
|
||||
polylines_out.end());
|
||||
this->connect_infill(std::move(polylines), expolygon, polylines_out, params);
|
||||
// new paths must be rotated back
|
||||
if (abs(infill_angle) >= EPSILON) {
|
||||
for (auto it = polylines_out.begin() + polylines_out_first_idx; it != polylines_out.end(); ++ it)
|
||||
|
|
|
|||
|
|
@ -285,7 +285,7 @@ static inline Point wipe_tower_point_to_object_point(GCode &gcodegen, const Vec2
|
|||
return Point(scale_(wipe_tower_pt.x() - gcodegen.origin()(0)), scale_(wipe_tower_pt.y() - gcodegen.origin()(1)));
|
||||
}
|
||||
|
||||
std::string WipeTowerIntegration::append_tcr(GCode &gcodegen, const WipeTower::ToolChangeResult &tcr, int new_extruder_id) const
|
||||
std::string WipeTowerIntegration::append_tcr(GCode &gcodegen, const WipeTower::ToolChangeResult &tcr, int new_extruder_id, double z) const
|
||||
{
|
||||
if (new_extruder_id != -1 && new_extruder_id != tcr.new_tool)
|
||||
throw std::invalid_argument("Error: WipeTowerIntegration::append_tcr was asked to do a toolchange it didn't expect.");
|
||||
|
|
@ -321,6 +321,15 @@ std::string WipeTowerIntegration::append_tcr(GCode &gcodegen, const WipeTower::T
|
|||
gcode += gcodegen.unretract();
|
||||
}
|
||||
|
||||
double current_z = gcodegen.writer().get_position().z();
|
||||
if (z == -1.) // in case no specific z was provided, print at current_z pos
|
||||
z = current_z;
|
||||
if (! is_approx(z, current_z)) {
|
||||
gcode += gcodegen.writer().retract();
|
||||
gcode += gcodegen.writer().travel_to_z(z, "Travel down to the last wipe tower layer.");
|
||||
gcode += gcodegen.writer().unretract();
|
||||
}
|
||||
|
||||
|
||||
// Process the end filament gcode.
|
||||
std::string end_filament_gcode_str;
|
||||
|
|
@ -387,16 +396,23 @@ std::string WipeTowerIntegration::append_tcr(GCode &gcodegen, const WipeTower::T
|
|||
// A phony move to the end position at the wipe tower.
|
||||
gcodegen.writer().travel_to_xy(end_pos.cast<double>());
|
||||
gcodegen.set_last_pos(wipe_tower_point_to_object_point(gcodegen, end_pos));
|
||||
if (! is_approx(z, current_z)) {
|
||||
gcode += gcodegen.writer().retract();
|
||||
gcode += gcodegen.writer().travel_to_z(current_z, "Travel back up to the topmost object layer.");
|
||||
gcode += gcodegen.writer().unretract();
|
||||
}
|
||||
|
||||
// Prepare a future wipe.
|
||||
gcodegen.m_wipe.path.points.clear();
|
||||
if (new_extruder_id >= 0) {
|
||||
// Start the wipe at the current position.
|
||||
gcodegen.m_wipe.path.points.emplace_back(wipe_tower_point_to_object_point(gcodegen, end_pos));
|
||||
// Wipe end point: Wipe direction away from the closer tower edge to the further tower edge.
|
||||
gcodegen.m_wipe.path.points.emplace_back(wipe_tower_point_to_object_point(gcodegen,
|
||||
Vec2f((std::abs(m_left - end_pos.x()) < std::abs(m_right - end_pos.x())) ? m_right : m_left,
|
||||
end_pos.y())));
|
||||
else {
|
||||
// Prepare a future wipe.
|
||||
gcodegen.m_wipe.path.points.clear();
|
||||
if (new_extruder_id >= 0) {
|
||||
// Start the wipe at the current position.
|
||||
gcodegen.m_wipe.path.points.emplace_back(wipe_tower_point_to_object_point(gcodegen, end_pos));
|
||||
// Wipe end point: Wipe direction away from the closer tower edge to the further tower edge.
|
||||
gcodegen.m_wipe.path.points.emplace_back(wipe_tower_point_to_object_point(gcodegen,
|
||||
Vec2f((std::abs(m_left - end_pos.x()) < std::abs(m_right - end_pos.x())) ? m_right : m_left,
|
||||
end_pos.y())));
|
||||
}
|
||||
}
|
||||
|
||||
// Let the planner know we are traveling between objects.
|
||||
|
|
@ -522,7 +538,23 @@ std::string WipeTowerIntegration::tool_change(GCode &gcodegen, int extruder_id,
|
|||
if (m_layer_idx < (int)m_tool_changes.size()) {
|
||||
if (! (size_t(m_tool_change_idx) < m_tool_changes[m_layer_idx].size()))
|
||||
throw std::runtime_error("Wipe tower generation failed, possibly due to empty first layer.");
|
||||
gcode += append_tcr(gcodegen, m_tool_changes[m_layer_idx][m_tool_change_idx++], extruder_id);
|
||||
|
||||
|
||||
// Calculate where the wipe tower layer will be printed. -1 means that print z will not change,
|
||||
// resulting in a wipe tower with sparse layers.
|
||||
double wipe_tower_z = -1;
|
||||
bool ignore_sparse = false;
|
||||
if (gcodegen.config().wipe_tower_no_sparse_layers.value) {
|
||||
wipe_tower_z = m_last_wipe_tower_print_z;
|
||||
ignore_sparse = (m_brim_done && m_tool_changes[m_layer_idx].size() == 1 && m_tool_changes[m_layer_idx].front().initial_tool == m_tool_changes[m_layer_idx].front().new_tool);
|
||||
if (m_tool_change_idx == 0 && ! ignore_sparse)
|
||||
wipe_tower_z = m_last_wipe_tower_print_z + m_tool_changes[m_layer_idx].front().layer_height;
|
||||
}
|
||||
|
||||
if (! ignore_sparse) {
|
||||
gcode += append_tcr(gcodegen, m_tool_changes[m_layer_idx][m_tool_change_idx++], extruder_id, wipe_tower_z);
|
||||
m_last_wipe_tower_print_z = wipe_tower_z;
|
||||
}
|
||||
}
|
||||
m_brim_done = true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ public:
|
|||
|
||||
private:
|
||||
WipeTowerIntegration& operator=(const WipeTowerIntegration&);
|
||||
std::string append_tcr(GCode &gcodegen, const WipeTower::ToolChangeResult &tcr, int new_extruder_id) const;
|
||||
std::string append_tcr(GCode &gcodegen, const WipeTower::ToolChangeResult &tcr, int new_extruder_id, double z = -1.) const;
|
||||
|
||||
// Postprocesses gcode: rotates and moves G1 extrusions and returns result
|
||||
std::string post_process_wipe_tower_moves(const WipeTower::ToolChangeResult& tcr, const Vec2f& translation, float angle) const;
|
||||
|
|
@ -134,6 +134,7 @@ private:
|
|||
int m_tool_change_idx;
|
||||
bool m_brim_done;
|
||||
bool i_have_brim = false;
|
||||
double m_last_wipe_tower_print_z = 0.f;
|
||||
};
|
||||
|
||||
class GCode {
|
||||
|
|
|
|||
|
|
@ -474,6 +474,7 @@ WipeTower::WipeTower(const PrintConfig& config, const std::vector<std::vector<fl
|
|||
m_z_pos(0.f),
|
||||
m_is_first_layer(false),
|
||||
m_bridging(float(config.wipe_tower_bridging)),
|
||||
m_no_sparse_layers(config.wipe_tower_no_sparse_layers),
|
||||
m_gcode_flavor(config.gcode_flavor),
|
||||
m_current_tool(initial_tool),
|
||||
wipe_volumes(wiping_matrix)
|
||||
|
|
@ -1145,9 +1146,10 @@ WipeTower::ToolChangeResult WipeTower::finish_layer()
|
|||
writer.set_initial_position((m_left_to_right ? fill_box.ru : fill_box.lu), // so there is never a diagonal travel
|
||||
m_wipe_tower_width, m_wipe_tower_depth, m_internal_rotation);
|
||||
|
||||
bool toolchanges_on_layer = m_layer_info->toolchanges_depth() > WT_EPSILON;
|
||||
box_coordinates box = fill_box;
|
||||
for (int i=0;i<2;++i) {
|
||||
if (m_layer_info->toolchanges_depth() < WT_EPSILON) { // there were no toolchanges on this layer
|
||||
if (! toolchanges_on_layer) {
|
||||
if (i==0) box.expand(m_perimeter_width);
|
||||
else box.expand(-m_perimeter_width);
|
||||
}
|
||||
|
|
@ -1201,9 +1203,12 @@ WipeTower::ToolChangeResult WipeTower::finish_layer()
|
|||
|
||||
m_depth_traversed = m_wipe_tower_depth-m_perimeter_width;
|
||||
|
||||
// Ask our writer about how much material was consumed:
|
||||
if (m_current_tool < m_used_filament_length.size())
|
||||
m_used_filament_length[m_current_tool] += writer.get_and_reset_used_filament_length();
|
||||
|
||||
// Ask our writer about how much material was consumed.
|
||||
// Skip this in case the layer is sparse and config option to not print sparse layers is enabled.
|
||||
if (! m_no_sparse_layers || toolchanges_on_layer)
|
||||
if (m_current_tool < m_used_filament_length.size())
|
||||
m_used_filament_length[m_current_tool] += writer.get_and_reset_used_filament_length();
|
||||
|
||||
ToolChangeResult result;
|
||||
result.priming = false;
|
||||
|
|
|
|||
|
|
@ -220,6 +220,7 @@ private:
|
|||
float m_parking_pos_retraction = 0.f;
|
||||
float m_extra_loading_move = 0.f;
|
||||
float m_bridging = 0.f;
|
||||
bool m_no_sparse_layers = false;
|
||||
bool m_set_extruder_trimpot = false;
|
||||
bool m_adhesion = true;
|
||||
GCodeFlavor m_gcode_flavor;
|
||||
|
|
|
|||
|
|
@ -46,9 +46,9 @@ public:
|
|||
if (indices.empty())
|
||||
clear();
|
||||
else {
|
||||
// Allocate a next highest power of 2 nodes, because the incomplete binary tree will not have the leaves filled strictly from the left.
|
||||
// Allocate enough memory for a full binary tree.
|
||||
m_nodes.assign(next_highest_power_of_2(indices.size() + 1), npos);
|
||||
build_recursive(indices, 0, 0, 0, (int)(indices.size() - 1));
|
||||
build_recursive(indices, 0, 0, 0, indices.size() - 1);
|
||||
}
|
||||
indices.clear();
|
||||
}
|
||||
|
|
@ -81,7 +81,7 @@ public:
|
|||
|
||||
private:
|
||||
// Build a balanced tree by splitting the input sequence by an axis aligned plane at a dimension.
|
||||
void build_recursive(std::vector<size_t> &input, size_t node, int dimension, int left, int right)
|
||||
void build_recursive(std::vector<size_t> &input, size_t node, const size_t dimension, const size_t left, const size_t right)
|
||||
{
|
||||
if (left > right)
|
||||
return;
|
||||
|
|
@ -94,54 +94,56 @@ private:
|
|||
return;
|
||||
}
|
||||
|
||||
// Partition the input sequence to two equal halves.
|
||||
int center = (left + right) >> 1;
|
||||
// Partition the input to left / right pieces of the same length to produce a balanced tree.
|
||||
size_t center = (left + right) / 2;
|
||||
partition_input(input, dimension, left, right, center);
|
||||
// Insert a node into the tree.
|
||||
m_nodes[node] = input[center];
|
||||
// Partition the left and right subtrees.
|
||||
size_t next_dimension = (++ dimension == NumDimensions) ? 0 : dimension;
|
||||
build_recursive(input, (node << 1) + 1, next_dimension, left, center - 1);
|
||||
build_recursive(input, (node << 1) + 2, next_dimension, center + 1, right);
|
||||
// Build up the left / right subtrees.
|
||||
size_t next_dimension = dimension;
|
||||
if (++ next_dimension == NumDimensions)
|
||||
next_dimension = 0;
|
||||
if (center > left)
|
||||
build_recursive(input, node * 2 + 1, next_dimension, left, center - 1);
|
||||
build_recursive(input, node * 2 + 2, next_dimension, center + 1, right);
|
||||
}
|
||||
|
||||
// Partition the input m_nodes <left, right> at k using QuickSelect method.
|
||||
// Partition the input m_nodes <left, right> at "k" and "dimension" using the QuickSelect method:
|
||||
// https://en.wikipedia.org/wiki/Quickselect
|
||||
void partition_input(std::vector<size_t> &input, int dimension, int left, int right, int k) const
|
||||
// Items left of the k'th item are lower than the k'th item in the "dimension",
|
||||
// items right of the k'th item are higher than the k'th item in the "dimension",
|
||||
void partition_input(std::vector<size_t> &input, const size_t dimension, size_t left, size_t right, const size_t k) const
|
||||
{
|
||||
while (left < right) {
|
||||
// Guess the k'th element.
|
||||
// Pick the pivot as a median of first, center and last value.
|
||||
// Sort first, center and last values.
|
||||
int center = (left + right) >> 1;
|
||||
auto left_value = this->coordinate(input[left], dimension);
|
||||
auto center_value = this->coordinate(input[center], dimension);
|
||||
auto right_value = this->coordinate(input[right], dimension);
|
||||
if (center_value < left_value) {
|
||||
std::swap(input[left], input[center]);
|
||||
std::swap(left_value, center_value);
|
||||
size_t center = (left + right) / 2;
|
||||
CoordType pivot;
|
||||
{
|
||||
// Bubble sort the input[left], input[center], input[right], so that a median of the three values
|
||||
// will end up in input[center].
|
||||
CoordType left_value = this->coordinate(input[left], dimension);
|
||||
CoordType center_value = this->coordinate(input[center], dimension);
|
||||
CoordType right_value = this->coordinate(input[right], dimension);
|
||||
if (left_value > center_value) {
|
||||
std::swap(input[left], input[center]);
|
||||
std::swap(left_value, center_value);
|
||||
}
|
||||
if (left_value > right_value) {
|
||||
std::swap(input[left], input[right]);
|
||||
right_value = left_value;
|
||||
}
|
||||
if (center_value > right_value) {
|
||||
std::swap(input[center], input[right]);
|
||||
center_value = right_value;
|
||||
}
|
||||
pivot = center_value;
|
||||
}
|
||||
if (right_value < left_value) {
|
||||
std::swap(input[left], input[right]);
|
||||
std::swap(left_value, right_value);
|
||||
}
|
||||
if (right_value < center_value) {
|
||||
std::swap(input[center], input[right]);
|
||||
// No need to do that, result is not used.
|
||||
// std::swap(center_value, right_value);
|
||||
}
|
||||
// Only two or three values are left and those are sorted already.
|
||||
if (left + 3 > right)
|
||||
if (right <= left + 2)
|
||||
// The <left, right> interval is already sorted.
|
||||
break;
|
||||
// left and right items are already at their correct positions.
|
||||
// input[left].point[dimension] <= input[center].point[dimension] <= input[right].point[dimension]
|
||||
// Move the pivot to the (right - 1) position.
|
||||
std::swap(input[center], input[right - 1]);
|
||||
// Pivot value.
|
||||
double pivot = this->coordinate(input[right - 1], dimension);
|
||||
size_t i = left;
|
||||
size_t j = right - 1;
|
||||
std::swap(input[center], input[j]);
|
||||
// Partition the set based on the pivot.
|
||||
int i = left;
|
||||
int j = right - 1;
|
||||
for (;;) {
|
||||
// Skip left points that are already at correct positions.
|
||||
// Search will certainly stop at position (right - 1), which stores the pivot.
|
||||
|
|
@ -153,7 +155,7 @@ private:
|
|||
std::swap(input[i], input[j]);
|
||||
}
|
||||
// Restore pivot to the center of the sequence.
|
||||
std::swap(input[i], input[right]);
|
||||
std::swap(input[i], input[right - 1]);
|
||||
// Which side the kth element is in?
|
||||
if (k < i)
|
||||
right = i - 1;
|
||||
|
|
@ -173,7 +175,7 @@ private:
|
|||
return;
|
||||
|
||||
// Left / right child node index.
|
||||
size_t left = (node << 1) + 1;
|
||||
size_t left = node * 2 + 1;
|
||||
size_t right = left + 1;
|
||||
unsigned int mask = visitor(m_nodes[node], dimension);
|
||||
if ((mask & (unsigned int)VisitorReturnMask::STOP) == 0) {
|
||||
|
|
|
|||
|
|
@ -201,6 +201,7 @@ bool Print::invalidate_state_by_config_options(const std::vector<t_config_option
|
|||
|| opt_key == "wipe_tower"
|
||||
|| opt_key == "wipe_tower_width"
|
||||
|| opt_key == "wipe_tower_bridging"
|
||||
|| opt_key == "wipe_tower_no_sparse_layers"
|
||||
|| opt_key == "wiping_volumes_matrix"
|
||||
|| opt_key == "parking_pos_retraction"
|
||||
|| opt_key == "cooling_tube_retraction"
|
||||
|
|
|
|||
|
|
@ -1837,6 +1837,14 @@ void PrintConfigDef::init_fff_params()
|
|||
def->mode = comAdvanced;
|
||||
def->set_default_value(new ConfigOptionBool(true));
|
||||
|
||||
def = this->add("wipe_tower_no_sparse_layers", coBool);
|
||||
def->label = L("No sparse layers (EXPERIMENTAL)");
|
||||
def->tooltip = L("If enabled, the wipe tower will not be printed on layers with no toolchanges. "
|
||||
"On layers with a toolchange, extruder will travel downward to print the wipe tower. "
|
||||
"User is responsible for ensuring there is no collision with the print.");
|
||||
def->mode = comAdvanced;
|
||||
def->set_default_value(new ConfigOptionBool(false));
|
||||
|
||||
def = this->add("support_material", coBool);
|
||||
def->label = L("Generate support material");
|
||||
def->category = L("Support material");
|
||||
|
|
|
|||
|
|
@ -669,6 +669,7 @@ public:
|
|||
ConfigOptionStrings start_filament_gcode;
|
||||
ConfigOptionBool single_extruder_multi_material;
|
||||
ConfigOptionBool single_extruder_multi_material_priming;
|
||||
ConfigOptionBool wipe_tower_no_sparse_layers;
|
||||
ConfigOptionString toolchange_gcode;
|
||||
ConfigOptionFloat travel_speed;
|
||||
ConfigOptionBool use_firmware_retraction;
|
||||
|
|
@ -739,6 +740,7 @@ protected:
|
|||
OPT_PTR(retract_speed);
|
||||
OPT_PTR(single_extruder_multi_material);
|
||||
OPT_PTR(single_extruder_multi_material_priming);
|
||||
OPT_PTR(wipe_tower_no_sparse_layers);
|
||||
OPT_PTR(start_gcode);
|
||||
OPT_PTR(start_filament_gcode);
|
||||
OPT_PTR(toolchange_gcode);
|
||||
|
|
|
|||
|
|
@ -237,11 +237,19 @@ std::vector<std::pair<size_t, bool>> chain_segments_greedy_constrained_reversals
|
|||
|
||||
// Chain the end points: find (num_segments - 1) shortest links not forming bifurcations or loops.
|
||||
assert(num_segments >= 2);
|
||||
#ifndef NDEBUG
|
||||
double distance_taken_last = 0.;
|
||||
#endif /* NDEBUG */
|
||||
for (int iter = int(num_segments) - 2;; -- iter) {
|
||||
assert(validate_graph_and_queue());
|
||||
// Take the first end point, for which the link points to the currently closest valid neighbor.
|
||||
EndPoint &end_point1 = *queue.top();
|
||||
assert(end_point1.edge_out != nullptr);
|
||||
#ifndef NDEBUG
|
||||
// Each edge added shall be longer than the previous one taken.
|
||||
assert(end_point1.distance_out > distance_taken_last - SCALED_EPSILON);
|
||||
distance_taken_last = end_point1.distance_out;
|
||||
#endif /* NDEBUG */
|
||||
assert(end_point1.edge_out != nullptr);
|
||||
// No point on the queue may be connected yet.
|
||||
assert(end_point1.chain_id == 0);
|
||||
// Take the closest end point to the first end point,
|
||||
|
|
@ -313,6 +321,10 @@ std::vector<std::pair<size_t, bool>> chain_segments_greedy_constrained_reversals
|
|||
assert(next_idx < end_points.size());
|
||||
end_point1.edge_out = &end_points[next_idx];
|
||||
end_point1.distance_out = (end_points[next_idx].pos - end_point1.pos).squaredNorm();
|
||||
#ifndef NDEBUG
|
||||
// Each edge shall be longer than the last one removed from the queue.
|
||||
assert(end_point1.distance_out > distance_taken_last - SCALED_EPSILON);
|
||||
#endif /* NDEBUG */
|
||||
// Update position of this end point in the queue based on the distance calculated at the line above.
|
||||
queue.update(end_point1.heap_idx);
|
||||
//FIXME Remove the other end point from the KD tree.
|
||||
|
|
@ -460,18 +472,206 @@ std::vector<size_t> chain_points(const Points &points, Point *start_near)
|
|||
return out;
|
||||
}
|
||||
|
||||
// Flip the sequences of polylines to lower the total length of connecting lines.
|
||||
// #define DEBUG_SVG_OUTPUT
|
||||
static inline void improve_ordering_by_segment_flipping(Polylines &polylines, bool fixed_start)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
auto cost = [&polylines]() {
|
||||
double sum = 0.;
|
||||
for (size_t i = 1; i < polylines.size(); ++i)
|
||||
sum += (polylines[i].first_point() - polylines[i - 1].last_point()).cast<double>().norm();
|
||||
return sum;
|
||||
};
|
||||
double cost_initial = cost();
|
||||
|
||||
static int iRun = 0;
|
||||
++ iRun;
|
||||
BoundingBox bbox = get_extents(polylines);
|
||||
#ifdef DEBUG_SVG_OUTPUT
|
||||
{
|
||||
SVG svg(debug_out_path("improve_ordering_by_segment_flipping-initial-%d.svg", iRun).c_str(), bbox);
|
||||
svg.draw(polylines);
|
||||
for (size_t i = 1; i < polylines.size(); ++ i)
|
||||
svg.draw(Line(polylines[i - 1].last_point(), polylines[i].first_point()), "red");
|
||||
}
|
||||
#endif /* DEBUG_SVG_OUTPUT */
|
||||
#endif /* NDEBUG */
|
||||
|
||||
struct Connection {
|
||||
Connection(size_t heap_idx = std::numeric_limits<size_t>::max(), bool flipped = false) : heap_idx(heap_idx), flipped(flipped) {}
|
||||
// Position of this object on MutablePriorityHeap.
|
||||
size_t heap_idx;
|
||||
// Is segment_idx flipped?
|
||||
bool flipped;
|
||||
|
||||
double squaredNorm(const Polylines &polylines, const std::vector<Connection> &connections) const
|
||||
{ return ((this + 1)->start_point(polylines, connections) - this->end_point(polylines, connections)).squaredNorm(); }
|
||||
double norm(const Polylines &polylines, const std::vector<Connection> &connections) const
|
||||
{ return sqrt(this->squaredNorm(polylines, connections)); }
|
||||
double squaredNorm(const Polylines &polylines, const std::vector<Connection> &connections, bool try_flip1, bool try_flip2) const
|
||||
{ return ((this + 1)->start_point(polylines, connections, try_flip2) - this->end_point(polylines, connections, try_flip1)).squaredNorm(); }
|
||||
double norm(const Polylines &polylines, const std::vector<Connection> &connections, bool try_flip1, bool try_flip2) const
|
||||
{ return sqrt(this->squaredNorm(polylines, connections, try_flip1, try_flip2)); }
|
||||
Vec2d start_point(const Polylines &polylines, const std::vector<Connection> &connections, bool flip = false) const
|
||||
{ const Polyline &pl = polylines[this - connections.data()]; return ((this->flipped == flip) ? pl.points.front() : pl.points.back()).cast<double>(); }
|
||||
Vec2d end_point(const Polylines &polylines, const std::vector<Connection> &connections, bool flip = false) const
|
||||
{ const Polyline &pl = polylines[this - connections.data()]; return ((this->flipped == flip) ? pl.points.back() : pl.points.front()).cast<double>(); }
|
||||
|
||||
bool in_queue() const { return this->heap_idx != std::numeric_limits<size_t>::max(); }
|
||||
void flip() { this->flipped = ! this->flipped; }
|
||||
};
|
||||
std::vector<Connection> connections(polylines.size());
|
||||
|
||||
#ifndef NDEBUG
|
||||
auto cost_flipped = [fixed_start, &polylines, &connections]() {
|
||||
assert(! fixed_start || ! connections.front().flipped);
|
||||
double sum = 0.;
|
||||
for (size_t i = 1; i < polylines.size(); ++ i)
|
||||
sum += connections[i - 1].norm(polylines, connections);
|
||||
return sum;
|
||||
};
|
||||
double cost_prev = cost_flipped();
|
||||
assert(std::abs(cost_initial - cost_prev) < SCALED_EPSILON);
|
||||
|
||||
auto print_statistics = [&polylines, &connections]() {
|
||||
#if 0
|
||||
for (size_t i = 1; i < polylines.size(); ++ i)
|
||||
printf("Connecting %d with %d: Current length %lf flip(%d, %d), left flipped: %lf, right flipped: %lf, both flipped: %lf, \n",
|
||||
int(i - 1), int(i),
|
||||
unscale<double>(connections[i - 1].norm(polylines, connections)),
|
||||
int(connections[i - 1].flipped), int(connections[i].flipped),
|
||||
unscale<double>(connections[i - 1].norm(polylines, connections, true, false)),
|
||||
unscale<double>(connections[i - 1].norm(polylines, connections, false, true)),
|
||||
unscale<double>(connections[i - 1].norm(polylines, connections, true, true)));
|
||||
#endif
|
||||
};
|
||||
print_statistics();
|
||||
#endif /* NDEBUG */
|
||||
|
||||
// Initialize a MutablePriorityHeap of connections between polylines.
|
||||
auto queue = make_mutable_priority_queue<Connection*>(
|
||||
[](Connection *connection, size_t idx){ connection->heap_idx = idx; },
|
||||
// Sort by decreasing connection distance.
|
||||
[&polylines, &connections](Connection *l, Connection *r){ return l->squaredNorm(polylines, connections) > r->squaredNorm(polylines, connections); });
|
||||
queue.reserve(polylines.size() - 1);
|
||||
for (size_t i = 0; i + 1 < polylines.size(); ++ i)
|
||||
queue.push(&connections[i]);
|
||||
|
||||
static constexpr size_t itercnt = 100;
|
||||
size_t iter = 0;
|
||||
for (; ! queue.empty() && iter < itercnt; ++ iter) {
|
||||
Connection &connection = *queue.top();
|
||||
queue.pop();
|
||||
connection.heap_idx = std::numeric_limits<size_t>::max();
|
||||
size_t idx_first = &connection - connections.data();
|
||||
// Try to flip segments starting with idx_first + 1 to the end.
|
||||
// Calculate the last segment to be flipped to improve the total path length.
|
||||
double length_current = connection.norm(polylines, connections);
|
||||
double length_flipped = connection.norm(polylines, connections, false, true);
|
||||
int best_idx_forward = int(idx_first);
|
||||
double best_improvement_forward = 0.;
|
||||
for (size_t i = idx_first + 1; i + 1 < connections.size(); ++ i) {
|
||||
length_current += connections[i].norm(polylines, connections);
|
||||
double this_improvement = length_current - length_flipped - connections[i].norm(polylines, connections, true, false);
|
||||
length_flipped += connections[i].norm(polylines, connections, true, true);
|
||||
if (this_improvement > best_improvement_forward) {
|
||||
best_improvement_forward = this_improvement;
|
||||
best_idx_forward = int(i);
|
||||
}
|
||||
// if (length_flipped > 1.5 * length_current)
|
||||
// break;
|
||||
}
|
||||
if (length_current - length_flipped > best_improvement_forward)
|
||||
// Best improvement by flipping up to the end.
|
||||
best_idx_forward = int(connections.size()) - 1;
|
||||
// Try to flip segments starting with idx_first - 1 to the start.
|
||||
// Calculate the last segment to be flipped to improve the total path length.
|
||||
length_current = connection.norm(polylines, connections);
|
||||
length_flipped = connection.norm(polylines, connections, true, false);
|
||||
int best_idx_backwards = int(idx_first);
|
||||
double best_improvement_backwards = 0.;
|
||||
for (int i = int(idx_first) - 1; i >= 0; -- i) {
|
||||
length_current += connections[i].norm(polylines, connections);
|
||||
double this_improvement = length_current - length_flipped - connections[i].norm(polylines, connections, false, true);
|
||||
length_flipped += connections[i].norm(polylines, connections, true, true);
|
||||
if (this_improvement > best_improvement_backwards) {
|
||||
best_improvement_backwards = this_improvement;
|
||||
best_idx_backwards = int(i);
|
||||
}
|
||||
// if (length_flipped > 1.5 * length_current)
|
||||
// break;
|
||||
}
|
||||
if (! fixed_start && length_current - length_flipped > best_improvement_backwards)
|
||||
// Best improvement by flipping up to the start including the first polyline.
|
||||
best_idx_backwards = -1;
|
||||
int update_begin = int(idx_first);
|
||||
int update_end = best_idx_forward;
|
||||
if (best_improvement_backwards > 0. && best_improvement_backwards > best_improvement_forward) {
|
||||
// Flip the sequence of polylines from idx_first to best_improvement_forward + 1.
|
||||
update_begin = best_idx_backwards;
|
||||
update_end = int(idx_first);
|
||||
}
|
||||
assert(update_begin <= update_end);
|
||||
if (update_begin == update_end)
|
||||
continue;
|
||||
for (int i = update_begin + 1; i <= update_end; ++ i)
|
||||
connections[i].flip();
|
||||
|
||||
#ifndef NDEBUG
|
||||
double cost = cost_flipped();
|
||||
assert(cost < cost_prev);
|
||||
cost_prev = cost;
|
||||
print_statistics();
|
||||
#endif /* NDEBUG */
|
||||
|
||||
update_end = std::min(update_end + 1, int(connections.size()) - 1);
|
||||
for (int i = std::max(0, update_begin); i < update_end; ++ i) {
|
||||
Connection &c = connections[i];
|
||||
if (c.in_queue())
|
||||
queue.update(c.heap_idx);
|
||||
else
|
||||
queue.push(&c);
|
||||
}
|
||||
}
|
||||
|
||||
// Flip the segments based on the flip flag.
|
||||
for (Polyline &pl : polylines)
|
||||
if (connections[&pl - polylines.data()].flipped)
|
||||
pl.reverse();
|
||||
|
||||
#ifndef NDEBUG
|
||||
double cost_final = cost();
|
||||
#ifdef DEBUG_SVG_OUTPUT
|
||||
{
|
||||
SVG svg(debug_out_path("improve_ordering_by_segment_flipping-final-%d.svg", iRun).c_str(), bbox);
|
||||
svg.draw(polylines);
|
||||
for (size_t i = 1; i < polylines.size(); ++ i)
|
||||
svg.draw(Line(polylines[i - 1].last_point(), polylines[i].first_point()), "red");
|
||||
}
|
||||
#endif /* DEBUG_SVG_OUTPUT */
|
||||
#endif /* NDEBUG */
|
||||
|
||||
assert(cost_final <= cost_prev);
|
||||
assert(cost_final <= cost_initial);
|
||||
}
|
||||
|
||||
Polylines chain_polylines(Polylines &&polylines, const Point *start_near)
|
||||
{
|
||||
auto segment_end_point = [&polylines](size_t idx, bool first_point) -> const Point& { return first_point ? polylines[idx].first_point() : polylines[idx].last_point(); };
|
||||
std::vector<std::pair<size_t, bool>> ordered = chain_segments_greedy<Point, decltype(segment_end_point)>(segment_end_point, polylines.size(), start_near);
|
||||
Polylines out;
|
||||
out.reserve(polylines.size());
|
||||
for (auto &segment_and_reversal : ordered) {
|
||||
out.emplace_back(std::move(polylines[segment_and_reversal.first]));
|
||||
if (segment_and_reversal.second)
|
||||
out.back().reverse();
|
||||
if (! polylines.empty()) {
|
||||
auto segment_end_point = [&polylines](size_t idx, bool first_point) -> const Point& { return first_point ? polylines[idx].first_point() : polylines[idx].last_point(); };
|
||||
std::vector<std::pair<size_t, bool>> ordered = chain_segments_greedy<Point, decltype(segment_end_point)>(segment_end_point, polylines.size(), start_near);
|
||||
out.reserve(polylines.size());
|
||||
for (auto &segment_and_reversal : ordered) {
|
||||
out.emplace_back(std::move(polylines[segment_and_reversal.first]));
|
||||
if (segment_and_reversal.second)
|
||||
out.back().reverse();
|
||||
}
|
||||
if (out.size() > 1)
|
||||
improve_ordering_by_segment_flipping(out, start_near != nullptr);
|
||||
}
|
||||
return out;
|
||||
return out;
|
||||
}
|
||||
|
||||
template<class T> static inline T chain_path_items(const Points &points, const T &items)
|
||||
|
|
|
|||
|
|
@ -272,27 +272,13 @@ void Bed3D::render(GLCanvas3D& canvas, float theta, float scale_factor) const
|
|||
|
||||
switch (m_type)
|
||||
{
|
||||
case MK2:
|
||||
{
|
||||
render_prusa(canvas, "mk2", theta > 90.0f);
|
||||
break;
|
||||
}
|
||||
case MK3:
|
||||
{
|
||||
render_prusa(canvas, "mk3", theta > 90.0f);
|
||||
break;
|
||||
}
|
||||
case SL1:
|
||||
{
|
||||
render_prusa(canvas, "sl1", theta > 90.0f);
|
||||
break;
|
||||
}
|
||||
case MK2: { render_prusa(canvas, "mk2", theta > 90.0f); break; }
|
||||
case MK3: { render_prusa(canvas, "mk3", theta > 90.0f); break; }
|
||||
case SL1: { render_prusa(canvas, "sl1", theta > 90.0f); break; }
|
||||
case MINI: { render_prusa(canvas, "mini", theta > 90.0f); break; }
|
||||
case ENDER3: { render_prusa(canvas, "ender3", theta > 90.0f); break; }
|
||||
default:
|
||||
case Custom:
|
||||
{
|
||||
render_custom(canvas, theta > 90.0f);
|
||||
break;
|
||||
}
|
||||
case Custom: { render_custom(canvas, theta > 90.0f); break; }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -364,22 +350,38 @@ Bed3D::EType Bed3D::detect_type(const Pointfs& shape) const
|
|||
{
|
||||
if (curr->config.has("bed_shape"))
|
||||
{
|
||||
if ((curr->vendor != nullptr) && (curr->vendor->name == "Prusa Research") && (shape == dynamic_cast<const ConfigOptionPoints*>(curr->config.option("bed_shape"))->values))
|
||||
if (curr->vendor != nullptr)
|
||||
{
|
||||
if (boost::contains(curr->name, "SL1"))
|
||||
if ((curr->vendor->name == "Prusa Research") && (shape == dynamic_cast<const ConfigOptionPoints*>(curr->config.option("bed_shape"))->values))
|
||||
{
|
||||
type = SL1;
|
||||
break;
|
||||
if (boost::contains(curr->name, "SL1"))
|
||||
{
|
||||
type = SL1;
|
||||
break;
|
||||
}
|
||||
else if (boost::contains(curr->name, "MK3") || boost::contains(curr->name, "MK2.5"))
|
||||
{
|
||||
type = MK3;
|
||||
break;
|
||||
}
|
||||
else if (boost::contains(curr->name, "MK2"))
|
||||
{
|
||||
type = MK2;
|
||||
break;
|
||||
}
|
||||
else if (boost::contains(curr->name, "MINI"))
|
||||
{
|
||||
type = MINI;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else if (boost::contains(curr->name, "MK3") || boost::contains(curr->name, "MK2.5"))
|
||||
else if ((curr->vendor->name == "Creality") && (shape == dynamic_cast<const ConfigOptionPoints*>(curr->config.option("bed_shape"))->values))
|
||||
{
|
||||
type = MK3;
|
||||
break;
|
||||
}
|
||||
else if (boost::contains(curr->name, "MK2"))
|
||||
{
|
||||
type = MK2;
|
||||
break;
|
||||
if (boost::contains(curr->name, "ENDER-3"))
|
||||
{
|
||||
type = ENDER3;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,6 +67,8 @@ public:
|
|||
MK2,
|
||||
MK3,
|
||||
SL1,
|
||||
MINI,
|
||||
ENDER3,
|
||||
Custom,
|
||||
Num_Types
|
||||
};
|
||||
|
|
|
|||
|
|
@ -166,6 +166,8 @@ PrinterPicker::PrinterPicker(wxWindow *parent, const VendorProfile &vendor, wxSt
|
|||
int max_row_width = 0;
|
||||
int current_row_width = 0;
|
||||
|
||||
bool is_variants = false;
|
||||
|
||||
for (const auto &model : models) {
|
||||
if (! filter(model)) { continue; }
|
||||
|
||||
|
|
@ -220,6 +222,7 @@ PrinterPicker::PrinterPicker(wxWindow *parent, const VendorProfile &vendor, wxSt
|
|||
auto *alt_label = new wxStaticText(variants_panel, wxID_ANY, _(L("Alternate nozzles:")));
|
||||
alt_label->SetFont(font_alt_nozzle);
|
||||
variants_sizer->Add(alt_label, 0, wxBOTTOM, 3);
|
||||
is_variants = true;
|
||||
}
|
||||
|
||||
auto *cbox = new Checkbox(variants_panel, label, model_id, variant.name);
|
||||
|
|
@ -280,10 +283,10 @@ PrinterPicker::PrinterPicker(wxWindow *parent, const VendorProfile &vendor, wxSt
|
|||
}
|
||||
title_sizer->AddStretchSpacer();
|
||||
|
||||
if (titles.size() > 1) {
|
||||
if (/*titles.size() > 1*/is_variants) {
|
||||
// It only makes sense to add the All / None buttons if there's multiple printers
|
||||
|
||||
auto *sel_all_std = new wxButton(this, wxID_ANY, _(L("All standard")));
|
||||
auto *sel_all_std = new wxButton(this, wxID_ANY, titles.size() > 1 ? _(L("All standard")) : _(L("Standard")));
|
||||
auto *sel_all = new wxButton(this, wxID_ANY, _(L("All")));
|
||||
auto *sel_none = new wxButton(this, wxID_ANY, _(L("None")));
|
||||
sel_all_std->Bind(wxEVT_BUTTON, [this](const wxCommandEvent &event) { this->select_all(true, false); });
|
||||
|
|
|
|||
|
|
@ -403,7 +403,7 @@ const std::vector<std::string>& Preset::print_options()
|
|||
"top_infill_extrusion_width", "support_material_extrusion_width", "infill_overlap", "bridge_flow_ratio", "clip_multipart_objects",
|
||||
"elefant_foot_compensation", "xy_size_compensation", "threads", "resolution", "wipe_tower", "wipe_tower_x", "wipe_tower_y",
|
||||
"wipe_tower_width", "wipe_tower_rotation_angle", "wipe_tower_bridging", "single_extruder_multi_material_priming",
|
||||
"compatible_printers", "compatible_printers_condition", "inherits"
|
||||
"wipe_tower_no_sparse_layers", "compatible_printers", "compatible_printers_condition", "inherits"
|
||||
};
|
||||
return s_opts;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1170,6 +1170,7 @@ void TabPrint::build()
|
|||
optgroup->append_single_option_line("wipe_tower_width");
|
||||
optgroup->append_single_option_line("wipe_tower_rotation_angle");
|
||||
optgroup->append_single_option_line("wipe_tower_bridging");
|
||||
optgroup->append_single_option_line("wipe_tower_no_sparse_layers");
|
||||
optgroup->append_single_option_line("single_extruder_multi_material_priming");
|
||||
|
||||
optgroup = page->new_optgroup(_(L("Advanced")));
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue