summaryrefslogtreecommitdiff
path: root/sc
diff options
context:
space:
mode:
authorDennis Francis <dennis.francis@collabora.com>2019-07-09 23:07:23 +0530
committerDennis Francis <dennis.francis@collabora.com>2019-10-01 18:11:36 +0200
commit46d0afba738d8ee7c9b63384fef513f42ee587f3 (patch)
treebc4f6927f56974f6fc3cdb6224437e01a01e372c /sc
parent845e1cdca3349c72e3083186502285d5b776abbe (diff)
Implement parallel version of super-scalar-sample-sort...
and use it for the pivot table construction routine processBuckets(). The implementation uses ideas from the non-parallel sample sort discussed in the below paper, but parallelizes the "binning"/"classification" operations and the sorting of the bins themselves. Sanders, Peter, and Sebastian Winkel. "Super scalar sample sort." European Symposium on Algorithms. Springer, Berlin, Heidelberg, 2004. which can be accessed at : http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.366&rep=rep1&type=pdf Change-Id: I3723b87e2feb8d7d9ee03f71f6025e26add914ce Reviewed-on: https://gerrit.libreoffice.org/79486 Tested-by: Jenkins Reviewed-by: Luboš Luňák <l.lunak@collabora.com>
Diffstat (limited to 'sc')
-rw-r--r--sc/source/core/data/dpcache.cxx8
1 files changed, 5 insertions, 3 deletions
diff --git a/sc/source/core/data/dpcache.cxx b/sc/source/core/data/dpcache.cxx
index cf7eaff0d53d..8a834614f94e 100644
--- a/sc/source/core/data/dpcache.cxx
+++ b/sc/source/core/data/dpcache.cxx
@@ -32,6 +32,7 @@
#include <columniterator.hxx>
#include <cellvalue.hxx>
+#include <comphelper/parallelsort.hxx>
#include <rtl/math.hxx>
#include <unotools/charclass.hxx>
#include <unotools/textsearch.hxx>
@@ -171,6 +172,7 @@ struct Bucket
ScDPItemData maValue;
SCROW mnOrderIndex;
SCROW mnDataIndex;
+ Bucket() {}
Bucket(const ScDPItemData& rValue, SCROW nData) :
maValue(rValue), mnOrderIndex(0), mnDataIndex(nData) {}
};
@@ -250,7 +252,7 @@ void processBuckets(std::vector<Bucket>& aBuckets, ScDPCache::Field& rField)
return;
// Sort by the value.
- std::sort(aBuckets.begin(), aBuckets.end(), LessByValue());
+ comphelper::parallelSort(aBuckets.begin(), aBuckets.end(), LessByValue());
{
// Set order index such that unique values have identical index value.
@@ -269,14 +271,14 @@ void processBuckets(std::vector<Bucket>& aBuckets, ScDPCache::Field& rField)
}
// Re-sort the bucket this time by the data index.
- std::sort(aBuckets.begin(), aBuckets.end(), LessByDataIndex());
+ comphelper::parallelSort(aBuckets.begin(), aBuckets.end(), LessByDataIndex());
// Copy the order index series into the field object.
rField.maData.reserve(aBuckets.size());
std::for_each(aBuckets.begin(), aBuckets.end(), PushBackOrderIndex(rField.maData));
// Sort by the value again.
- std::sort(aBuckets.begin(), aBuckets.end(), LessByOrderIndex());
+ comphelper::parallelSort(aBuckets.begin(), aBuckets.end(), LessByOrderIndex());
// Unique by value.
std::vector<Bucket>::iterator itUniqueEnd =