mirror of
https://github.com/SoftFever/OrcaSlicer.git
synced 2025-10-24 09:11:23 -06:00
fixing optimizer and concurrency::reduce
This commit is contained in:
parent
c193d7c930
commit
c10ff4f503
6 changed files with 142 additions and 41 deletions
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef BRUTEFORCEOPTIMIZER_HPP
|
||||
#define BRUTEFORCEOPTIMIZER_HPP
|
||||
|
||||
#include <libslic3r/Optimize/NLoptOptimizer.hpp>
|
||||
#include <libslic3r/Optimize/Optimizer.hpp>
|
||||
|
||||
namespace Slic3r { namespace opt {
|
||||
|
||||
|
|
@ -24,19 +24,19 @@ struct AlgBurteForce {
|
|||
AlgBurteForce(const StopCriteria &cr, size_t gs): stc{cr}, gridsz{gs} {}
|
||||
|
||||
template<int D, size_t N, class Fn, class Cmp>
|
||||
void run(std::array<size_t, N> &idx,
|
||||
bool run(std::array<size_t, N> &idx,
|
||||
Result<N> &result,
|
||||
const Bounds<N> &bounds,
|
||||
Fn &&fn,
|
||||
Cmp &&cmp)
|
||||
{
|
||||
if (stc.stop_condition()) return;
|
||||
if (stc.stop_condition()) return false;
|
||||
|
||||
if constexpr (D < 0) {
|
||||
Input<N> inp;
|
||||
|
||||
auto max_iter = stc.max_iterations();
|
||||
if (max_iter && num_iter(idx, gridsz) >= max_iter) return;
|
||||
if (max_iter && num_iter(idx, gridsz) >= max_iter) return false;
|
||||
|
||||
for (size_t d = 0; d < N; ++d) {
|
||||
const Bound &b = bounds[d];
|
||||
|
|
@ -46,17 +46,25 @@ struct AlgBurteForce {
|
|||
|
||||
auto score = fn(inp);
|
||||
if (cmp(score, result.score)) {
|
||||
double absdiff = std::abs(score - result.score);
|
||||
|
||||
result.score = score;
|
||||
result.optimum = inp;
|
||||
|
||||
if (absdiff < stc.abs_score_diff() ||
|
||||
absdiff < stc.rel_score_diff() * std::abs(score))
|
||||
return false;
|
||||
}
|
||||
|
||||
} else {
|
||||
for (size_t i = 0; i < gridsz; ++i) {
|
||||
idx[D] = i;
|
||||
run<D - 1>(idx, result, bounds, std::forward<Fn>(fn),
|
||||
std::forward<Cmp>(cmp));
|
||||
if (!run<D - 1>(idx, result, bounds, std::forward<Fn>(fn),
|
||||
std::forward<Cmp>(cmp))) return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class Fn, size_t N>
|
||||
|
|
|
|||
|
|
@ -43,23 +43,36 @@ template<> struct _ccr<true>
|
|||
});
|
||||
}
|
||||
|
||||
template<class I, class Fn, class MergeFn, class T>
|
||||
static T reduce(I from,
|
||||
I to,
|
||||
const T & init,
|
||||
Fn && fn,
|
||||
MergeFn &&mergefn,
|
||||
size_t granularity = 1)
|
||||
template<class I, class MergeFn, class T, class AccessFn>
|
||||
static T reduce(I from,
|
||||
I to,
|
||||
const T &init,
|
||||
MergeFn &&mergefn,
|
||||
AccessFn &&access,
|
||||
size_t granularity = 1
|
||||
)
|
||||
{
|
||||
return tbb::parallel_reduce(
|
||||
tbb::blocked_range{from, to, granularity}, init,
|
||||
[&](const auto &range, T subinit) {
|
||||
T acc = subinit;
|
||||
loop_(range, [&](auto &i) { acc = mergefn(acc, fn(i, acc)); });
|
||||
loop_(range, [&](auto &i) { acc = mergefn(acc, access(i)); });
|
||||
return acc;
|
||||
},
|
||||
std::forward<MergeFn>(mergefn));
|
||||
}
|
||||
|
||||
template<class I, class MergeFn, class T>
|
||||
static IteratorOnly<I, T> reduce(I from,
|
||||
I to,
|
||||
const T & init,
|
||||
MergeFn &&mergefn,
|
||||
size_t granularity = 1)
|
||||
{
|
||||
return reduce(
|
||||
from, to, init, std::forward<MergeFn>(mergefn),
|
||||
[](typename I::value_type &i) { return i; }, granularity);
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct _ccr<false>
|
||||
|
|
@ -92,18 +105,31 @@ public:
|
|||
loop_(from, to, std::forward<Fn>(fn));
|
||||
}
|
||||
|
||||
template<class I, class Fn, class MergeFn, class T>
|
||||
static IntegerOnly<I, T> reduce(I from,
|
||||
I to,
|
||||
const T & init,
|
||||
Fn && fn,
|
||||
MergeFn &&mergefn,
|
||||
size_t /*granularity*/ = 1)
|
||||
template<class I, class MergeFn, class T, class AccessFn>
|
||||
static T reduce(I from,
|
||||
I to,
|
||||
const T & init,
|
||||
MergeFn &&mergefn,
|
||||
AccessFn &&access,
|
||||
size_t /*granularity*/ = 1
|
||||
)
|
||||
{
|
||||
T acc = init;
|
||||
loop_(from, to, [&](auto &i) { acc = mergefn(acc, fn(i, acc)); });
|
||||
loop_(from, to, [&](auto &i) { acc = mergefn(acc, access(i)); });
|
||||
return acc;
|
||||
}
|
||||
|
||||
template<class I, class MergeFn, class T>
|
||||
static IteratorOnly<I, T> reduce(I from,
|
||||
I to,
|
||||
const T &init,
|
||||
MergeFn &&mergefn,
|
||||
size_t /*granularity*/ = 1
|
||||
)
|
||||
{
|
||||
return reduce(from, to, init, std::forward<MergeFn>(mergefn),
|
||||
[](typename I::value_type &i) { return i; });
|
||||
}
|
||||
};
|
||||
|
||||
using ccr = _ccr<USE_FULL_CONCURRENCY>;
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ VertexFaceMap create_vertex_face_map(const TriangleMesh &mesh) {
|
|||
return vmap;
|
||||
}
|
||||
|
||||
// Find transformed mesh ground level without copy and with parallell reduce.
|
||||
// Find transformed mesh ground level without copy and with parallel reduce.
|
||||
double find_ground_level(const TriangleMesh &mesh,
|
||||
const Transform3d & tr,
|
||||
size_t threads)
|
||||
|
|
@ -40,15 +40,13 @@ double find_ground_level(const TriangleMesh &mesh,
|
|||
|
||||
auto minfn = [](double a, double b) { return std::min(a, b); };
|
||||
|
||||
auto findminz = [&mesh, &tr] (size_t vi, double submin) {
|
||||
Vec3d v = tr * mesh.its.vertices[vi].template cast<double>();
|
||||
return std::min(submin, v.z());
|
||||
auto accessfn = [&mesh, &tr] (size_t vi) {
|
||||
return (tr * mesh.its.vertices[vi].template cast<double>()).z();
|
||||
};
|
||||
|
||||
double zmin = mesh.its.vertices.front().z();
|
||||
|
||||
return ccr_par::reduce(size_t(0), vsize, zmin, findminz, minfn,
|
||||
vsize / threads);
|
||||
size_t granularity = vsize / threads;
|
||||
return ccr_par::reduce(size_t(0), vsize, zmin, minfn, accessfn, granularity);
|
||||
}
|
||||
|
||||
// Try to guess the number of support points needed to support a mesh
|
||||
|
|
@ -65,7 +63,7 @@ double calculate_model_supportedness(const TriangleMesh & mesh,
|
|||
|
||||
double zmin = find_ground_level(mesh, tr, Nthr);
|
||||
|
||||
auto score_mergefn = [&mesh, &tr, zmin](size_t fi, double subscore) {
|
||||
auto accessfn = [&mesh, &tr, zmin](size_t fi) {
|
||||
|
||||
static const Vec3d DOWN = {0., 0., -1.};
|
||||
|
||||
|
|
@ -83,21 +81,18 @@ double calculate_model_supportedness(const TriangleMesh & mesh,
|
|||
double zlvl = zmin + 0.1;
|
||||
if (p1.z() <= zlvl && p2.z() <= zlvl && p3.z() <= zlvl) {
|
||||
// score += area * POINTS_PER_UNIT_AREA;
|
||||
return subscore;
|
||||
return 0.;
|
||||
}
|
||||
|
||||
double phi = 1. - std::acos(N.dot(DOWN)) / PI;
|
||||
phi = phi * (phi > 0.5);
|
||||
// phi = phi * (phi > 0.5);
|
||||
|
||||
// std::cout << "area: " << area << std::endl;
|
||||
|
||||
subscore += area * POINTS_PER_UNIT_AREA * phi;
|
||||
|
||||
return subscore;
|
||||
return area * POINTS_PER_UNIT_AREA * phi;
|
||||
};
|
||||
|
||||
double score = ccr_seq::reduce(size_t(0), facesize, 0., score_mergefn,
|
||||
std::plus<double>{}, facesize / Nthr);
|
||||
double score = ccr_par::reduce(size_t(0), facesize, 0., std::plus<double>{}, accessfn, facesize / Nthr);
|
||||
|
||||
return score / mesh.its.indices.size();
|
||||
}
|
||||
|
|
@ -107,7 +102,7 @@ std::array<double, 2> find_best_rotation(const ModelObject& modelobj,
|
|||
std::function<void(unsigned)> statuscb,
|
||||
std::function<bool()> stopcond)
|
||||
{
|
||||
static const unsigned MAX_TRIES = 100;
|
||||
static const unsigned MAX_TRIES = 10000;
|
||||
|
||||
// return value
|
||||
std::array<double, 2> rot;
|
||||
|
|
@ -158,10 +153,10 @@ std::array<double, 2> find_best_rotation(const ModelObject& modelobj,
|
|||
.max_iterations(max_tries)
|
||||
.rel_score_diff(1e-6)
|
||||
.stop_condition(stopcond),
|
||||
10 /*grid size*/);
|
||||
100 /*grid size*/);
|
||||
|
||||
// We are searching rotations around the three axes x, y, z. Thus the
|
||||
// problem becomes a 3 dimensional optimization task.
|
||||
// We are searching rotations around only two axes x, y. Thus the
|
||||
// problem becomes a 2 dimensional optimization task.
|
||||
// We can specify the bounds for a dimension in the following way:
|
||||
auto b = opt::Bound{-PI, PI};
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue