summaryrefslogtreecommitdiff
path: root/svl
diff options
context:
space:
mode:
authorNoel Grandin <noel.grandin@collabora.co.uk>2020-06-29 15:41:38 +0200
committerNoel Grandin <noel.grandin@collabora.co.uk>2020-07-01 08:31:03 +0200
commitbde0cc68bd6443c97b4ca061ed7132f478959281 (patch)
tree0ae78dfc121e5e44b8aa2dda2031adacc021a9ab /svl
parent2469ef45f1701f00b76f19666aa75fc8b6addcac (diff)
tdf#132454 some more improvements
Defer moving around the vector by tagging the last bit in the pointer. This takes the undo operation from 10s to 8s on my machine. Also re-organise the fields a little to improve packing. Also add some design notes Change-Id: I38fa9156705c00bb9f64e2fc59ea862eba522942 Reviewed-on: https://gerrit.libreoffice.org/c/core/+/97424 Tested-by: Jenkins Reviewed-by: Noel Grandin <noel.grandin@collabora.co.uk>
Diffstat (limited to 'svl')
-rw-r--r--svl/source/notify/broadcast.cxx93
1 files changed, 84 insertions, 9 deletions
diff --git a/svl/source/notify/broadcast.cxx b/svl/source/notify/broadcast.cxx
index 5bd8aeef1628..586a09600ef3 100644
--- a/svl/source/notify/broadcast.cxx
+++ b/svl/source/notify/broadcast.cxx
@@ -24,6 +24,47 @@
#include <cassert>
#include <algorithm>
+/**
+ Design Notes
+ -------------------------------
+
+ This class is extremely heavily used - we can have millions of broadcasters and listeners and we can also
+ have a broadcaster that has a million listeners.
+
+ So we do a number of things
+ (*) use a cache-dense listener list (std::vector) because caching effects dominate a lot of operations
+ (*) use a sorted list to speed up find operations
+ (*) only sort the list when we absolutely have to, to speed up sequential add/remove operations
+ (*) defer removing items from the list by (ab)using the last bit of the pointer
+
+ Also we have some complications around destruction because
+ (*) we broadcast a message to indicate that we are destructing
+ (*) which can trigger arbitrality complicated behaviour, including
+ (*) adding a removing things from the in-destruction object!
+
+*/
+
+static bool isDeletedPtr(SvtListener* p)
+{
+ /** mark deleted entries by toggling the last bit,which is effectively unused, since the struct we point
+ * to is at least 16-bit aligned. This allows the binary seach to continue working even when we have
+ * deleted entries */
+#if SAL_TYPES_SIZEOFPOINTER == 4
+ return (reinterpret_cast<sal_uInt32>(p) & 0x01) == 0x01;
+#else
+ return (reinterpret_cast<sal_uInt64>(p) & 0x01) == 0x01;
+#endif
+}
+
+static void markDeletedPtr(SvtListener*& rp)
+{
+#if SAL_TYPES_SIZEOFPOINTER == 4
+ reinterpret_cast<sal_uInt32&>(rp) |= 0x01;
+#else
+ reinterpret_cast<sal_uInt64&>(rp) |= 0x01;
+#endif
+}
+
static void sortListeners(std::vector<SvtListener*>& listeners, size_t firstUnsorted)
{
// Add() only appends new values, so often the container will be sorted expect for one
@@ -51,7 +92,16 @@ static void sortListeners(std::vector<SvtListener*>& listeners, size_t firstUnso
void SvtBroadcaster::Normalize() const
{
- if (mnListenersFirstUnsorted != maListeners.size())
+ // clear empty slots first, because then we often have to do very little sorting
+ if (mnEmptySlots)
+ {
+ maListeners.erase(
+ std::remove_if(maListeners.begin(), maListeners.end(), [] (SvtListener* p) { return isDeletedPtr(p); }),
+ maListeners.end());
+ mnEmptySlots = 0;
+ }
+
+ if (mnListenersFirstUnsorted != static_cast<sal_Int32>(maListeners.size()))
{
sortListeners(maListeners, mnListenersFirstUnsorted);
mnListenersFirstUnsorted = maListeners.size();
@@ -71,8 +121,27 @@ void SvtBroadcaster::Add( SvtListener* p )
if (mbDisposing || mbAboutToDie)
return;
// Avoid normalizing if the item appended keeps the container sorted.
- if (maListeners.empty() || (mnListenersFirstUnsorted == maListeners.size() && maListeners.back() < p))
+ auto nRealSize = static_cast<sal_Int32>(maListeners.size() - mnEmptySlots);
+ auto bSorted = mnListenersFirstUnsorted == nRealSize;
+ if (maListeners.empty() || (bSorted && maListeners.back() <= p))
+ {
++mnListenersFirstUnsorted;
+ maListeners.push_back(p);
+ return;
+ }
+ // see if we can stuff it into an empty slot, which succeeds surprisingly often in
+ // some calc workloads where it removes and then re-adds the same listener
+ if (mnEmptySlots && bSorted)
+ {
+ auto it = std::lower_bound(maListeners.begin(), maListeners.end(), p);
+ if (it != maListeners.end() && isDeletedPtr(*it))
+ {
+ *it = p;
+ ++mnListenersFirstUnsorted;
+ --mnEmptySlots;
+ return;
+ }
+ }
maListeners.push_back(p);
}
@@ -90,30 +159,36 @@ void SvtBroadcaster::Remove( SvtListener* p )
return;
}
- Normalize();
+ // We only need to fully normalize if one or more Add()s have been performed that make the array unsorted.
+ auto nRealSize = static_cast<sal_Int32>(maListeners.size() - mnEmptySlots);
+ if (mnListenersFirstUnsorted != nRealSize || (maListeners.size() > 1024 && maListeners.size() / nRealSize > 16))
+ Normalize();
auto it = std::lower_bound(maListeners.begin(), maListeners.end(), p);
assert (it != maListeners.end() && *it == p);
if (it != maListeners.end() && *it == p)
{
- maListeners.erase(it);
+ markDeletedPtr(*it);
+ ++mnEmptySlots;
--mnListenersFirstUnsorted;
}
- if (maListeners.empty())
+ if (maListeners.size() - mnEmptySlots == 0)
ListenersGone();
}
SvtBroadcaster::SvtBroadcaster()
- : mbAboutToDie(false)
+ : mnEmptySlots(0)
+ , mnListenersFirstUnsorted(0)
+ , mbAboutToDie(false)
, mbDisposing(false)
, mbDestNormalized(true)
- , mnListenersFirstUnsorted(0)
{}
SvtBroadcaster::SvtBroadcaster( const SvtBroadcaster &rBC ) :
+ mnEmptySlots(0), mnListenersFirstUnsorted(0),
mbAboutToDie(false), mbDisposing(false),
- mbDestNormalized(true), mnListenersFirstUnsorted(0)
+ mbDestNormalized(true)
{
assert(!rBC.mbAboutToDie && "copying an object marked with PrepareForDestruction()?");
assert(!rBC.mbDisposing && "copying an object that is in its destructor?");
@@ -179,7 +254,7 @@ const SvtBroadcaster::ListenersType& SvtBroadcaster::GetAllListeners() const
bool SvtBroadcaster::HasListeners() const
{
- return !maListeners.empty();
+ return (maListeners.size() - mnEmptySlots) > 0;
}
void SvtBroadcaster::PrepareForDestruction()