summaryrefslogtreecommitdiff
path: root/vcl
diff options
context:
space:
mode:
authorLuboš Luňák <l.lunak@collabora.com>2019-12-19 12:54:39 +0100
committerLuboš Luňák <l.lunak@collabora.com>2020-01-06 11:12:37 +0100
commit7702efe9ec6bbc428d89e83d957677cd3d323803 (patch)
treed19f05674c63ac1636d42165466cecaaeae7c9b3 /vcl
parentaab9dafdae868b9e55c143cdc08598895f19f63d (diff)
use copy-on-write for SkiaSalBitmap data
E.g. scaling works by first making a copy using Create() and then Scale() is called for the copy. This means there was a needless data copy for mBuffer in Create(). Also make sure SkBitmap is properly unshared if needed, as it seems copies of it always share the pixels. Change-Id: I30a758c84e7218e9afe516477aa8429364ef8607 Reviewed-on: https://gerrit.libreoffice.org/c/core/+/85543 Tested-by: Jenkins Reviewed-by: Luboš Luňák <l.lunak@collabora.com>
Diffstat (limited to 'vcl')
-rw-r--r--vcl/inc/skia/salbmp.hxx7
-rw-r--r--vcl/skia/salbmp.cxx70
2 files changed, 53 insertions, 24 deletions
diff --git a/vcl/inc/skia/salbmp.hxx b/vcl/inc/skia/salbmp.hxx
index a0fa2d900c78..78b864104a2c 100644
--- a/vcl/inc/skia/salbmp.hxx
+++ b/vcl/inc/skia/salbmp.hxx
@@ -74,6 +74,9 @@ private:
// if necessary).
void EnsureBitmapData();
void EnsureBitmapData() const { return const_cast<SkiaSalBitmap*>(this)->EnsureBitmapData(); }
+ // Like EnsureBitmapData(), but will also make any shared data unique.
+ // Call before changing the data.
+ void EnsureBitmapUniqueData();
// Allocate mBitmap or mBuffer (with uninitialized contents).
bool CreateBitmapData();
SkBitmap GetAsSkBitmap() const;
@@ -90,7 +93,7 @@ private:
// B - has SkBitmap, D - has data buffer, I/i - has SkImage (on GPU/CPU),
// A/a - has alpha SkImage (on GPU/CPU)
return stream << static_cast<const void*>(bitmap) << " " << bitmap->GetSize() << "/"
- << bitmap->mBitCount << (!bitmap->mBitmap.drawsNothing() ? "B" : "")
+ << bitmap->mBitCount << (!bitmap->mBitmap.isNull() ? "B" : "")
<< (bitmap->mBuffer.get() ? "D" : "")
<< (bitmap->mImage ? (bitmap->mImage->isTextureBacked() ? "I" : "i") : "")
<< (bitmap->mAlphaImage ? (bitmap->mAlphaImage->isTextureBacked() ? "A" : "a")
@@ -111,7 +114,7 @@ private:
// mBitmap/mBuffer must be filled from it on demand if necessary by EnsureBitmapData().
SkBitmap mBitmap;
sk_sp<SkImage> mImage; // possibly GPU-backed
- std::unique_ptr<sal_uInt8[]> mBuffer;
+ std::shared_ptr<sal_uInt8[]> mBuffer;
int mScanlineSize; // size of one row in mBuffer
sk_sp<SkImage> mAlphaImage; // cached contents as alpha image, possibly GPU-backed
#ifdef DBG_UTIL
diff --git a/vcl/skia/salbmp.cxx b/vcl/skia/salbmp.cxx
index 9bc2d0ecf217..781d6f87c9ad 100644
--- a/vcl/skia/salbmp.cxx
+++ b/vcl/skia/salbmp.cxx
@@ -174,24 +174,16 @@ bool SkiaSalBitmap::Create(const SalBitmap& rSalBmp, sal_uInt16 nNewBitCount)
const SkiaSalBitmap& src = static_cast<const SkiaSalBitmap&>(rSalBmp);
if (nNewBitCount == src.GetBitCount())
{
- mBitmap = src.mBitmap; // TODO unshare?
- mImage = src.mImage; // TODO unshare?
- mAlphaImage = src.mAlphaImage; // TODO unshare?
+ mBitmap = src.mBitmap;
+ // SkBitmap shares pixels on copy.
+ assert(mBitmap.getPixels() == src.mBitmap.getPixels());
+ mImage = src.mImage;
+ mAlphaImage = src.mAlphaImage;
+ mBuffer = src.mBuffer;
mPalette = src.mPalette;
mBitCount = src.mBitCount;
mSize = src.mSize;
- if (src.mBuffer != nullptr)
- {
- sal_uInt32 allocate = src.mScanlineSize * src.mSize.Height();
-#ifdef DBG_UTIL
- assert(memcmp(src.mBuffer.get() + allocate, CANARY, sizeof(CANARY)) == 0);
- allocate += sizeof(CANARY);
-#endif
- sal_uInt8* newBuffer = new sal_uInt8[allocate];
- memcpy(newBuffer, src.mBuffer.get(), allocate);
- mBuffer.reset(newBuffer);
- mScanlineSize = src.mScanlineSize;
- }
+ mScanlineSize = src.mScanlineSize;
#ifdef DBG_UTIL
mWriteAccessCount = 0;
#endif
@@ -228,11 +220,18 @@ sal_uInt16 SkiaSalBitmap::GetBitCount() const { return mBitCount; }
BitmapBuffer* SkiaSalBitmap::AcquireBuffer(BitmapAccessMode nMode)
{
-#ifndef DBG_UTIL
- (void)nMode; // TODO
-#endif
- EnsureBitmapData();
- if (mBitmap.drawsNothing() && !mBuffer)
+ switch (nMode)
+ {
+ case BitmapAccessMode::Write:
+ EnsureBitmapUniqueData();
+ break;
+ case BitmapAccessMode::Read:
+ EnsureBitmapData();
+ break;
+ default:
+ break;
+ }
+ if (mBitmap.isNull() && !mBuffer)
return nullptr;
#ifdef DBG_UTIL
// BitmapWriteAccess stores also a copy of the palette and it can
@@ -358,7 +357,7 @@ SkBitmap SkiaSalBitmap::GetAsSkBitmap() const
assert(mWriteAccessCount == 0);
#endif
EnsureBitmapData();
- if (!mBitmap.drawsNothing())
+ if (!mBitmap.isNull())
return mBitmap;
SkBitmap bitmap;
if (mBuffer)
@@ -495,7 +494,7 @@ const sk_sp<SkImage>& SkiaSalBitmap::GetAlphaSkImage() const
void SkiaSalBitmap::EnsureBitmapData()
{
- if (!mBitmap.drawsNothing() || mBuffer)
+ if (!mBitmap.isNull() || mBuffer)
return;
if (!mImage)
return;
@@ -559,6 +558,33 @@ void SkiaSalBitmap::EnsureBitmapData()
}
}
+void SkiaSalBitmap::EnsureBitmapUniqueData()
+{
+ EnsureBitmapData();
+ // TODO threads?
+ if (mBitmap.pixelRef() && !mBitmap.pixelRef()->unique())
+ {
+ // SkBitmap copies share pixels, so make a deep copy.
+ SkBitmap newBitmap;
+ if (!newBitmap.tryAllocPixels(mBitmap.info()))
+ abort();
+ newBitmap.writePixels(mBitmap.pixmap());
+ assert(newBitmap.getPixels() != mBitmap.getPixels());
+ mBitmap = newBitmap;
+ }
+ if (mBuffer.use_count() > 1)
+ {
+ sal_uInt32 allocate = mScanlineSize * mSize.Height();
+#ifdef DBG_UTIL
+ assert(memcmp(mBuffer.get() + allocate, CANARY, sizeof(CANARY)) == 0);
+ allocate += sizeof(CANARY);
+#endif
+ sal_uInt8* newBuffer = new sal_uInt8[allocate];
+ memcpy(newBuffer, mBuffer.get(), allocate);
+ mBuffer.reset(newBuffer);
+ }
+}
+
void SkiaSalBitmap::ResetSkImages()
{
mAlphaImage.reset();