[Impeller] remove Buffer type and associated abstractions. (flutter/engine#49702)

Now that the HostBuffer isn't implementing Buffer, there is really no reason to have a Buffer/DeviceBuffer split. make BufferViews use a DeviceBuffer, and remove the data ptr from buffer views (as we can always reach into the device buffer).

Makes some other cleanups too, since we always use a std::shared_ptr<DeviceBuffer>, then we can eliminated the shared_from_this call for AsBufferView by making this a static method.
This commit is contained in:
Jonah Williams 2024-01-12 12:22:11 -08:00 committed by GitHub
parent 0d8738a2fa
commit 06ee89691e
32 changed files with 195 additions and 258 deletions

View File

@ -5050,8 +5050,6 @@ ORIGIN: ../../../flutter/impeller/compiler/utilities.cc + ../../../flutter/LICEN
ORIGIN: ../../../flutter/impeller/compiler/utilities.h + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/core/allocator.cc + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/core/allocator.h + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/core/buffer.cc + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/core/buffer.h + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/core/buffer_view.cc + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/core/buffer_view.h + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/core/capture.cc + ../../../flutter/LICENSE
@ -7877,8 +7875,6 @@ FILE: ../../../flutter/impeller/compiler/utilities.cc
FILE: ../../../flutter/impeller/compiler/utilities.h
FILE: ../../../flutter/impeller/core/allocator.cc
FILE: ../../../flutter/impeller/core/allocator.h
FILE: ../../../flutter/impeller/core/buffer.cc
FILE: ../../../flutter/impeller/core/buffer.h
FILE: ../../../flutter/impeller/core/buffer_view.cc
FILE: ../../../flutter/impeller/core/buffer_view.h
FILE: ../../../flutter/impeller/core/capture.cc

View File

@ -5,8 +5,6 @@
#ifndef FLUTTER_IMPELLER_BASE_BACKEND_CAST_H_
#define FLUTTER_IMPELLER_BASE_BACKEND_CAST_H_
#include "flutter/fml/macros.h"
namespace impeller {
template <class Sub, class Base>

View File

@ -8,8 +8,6 @@ impeller_component("core") {
sources = [
"allocator.cc",
"allocator.h",
"buffer.cc",
"buffer.h",
"buffer_view.cc",
"buffer_view.h",
"capture.cc",

View File

@ -1,11 +0,0 @@
// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "impeller/core/buffer.h"
namespace impeller {
Buffer::~Buffer() = default;
} // namespace impeller

View File

@ -1,24 +0,0 @@
// Copyright 2013 The Flutter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef FLUTTER_IMPELLER_CORE_BUFFER_H_
#define FLUTTER_IMPELLER_CORE_BUFFER_H_
#include <memory>
namespace impeller {
class DeviceBuffer;
class Allocator;
class Buffer {
public:
virtual ~Buffer();
virtual std::shared_ptr<const DeviceBuffer> GetDeviceBuffer() const = 0;
};
} // namespace impeller
#endif // FLUTTER_IMPELLER_CORE_BUFFER_H_

View File

@ -5,14 +5,15 @@
#ifndef FLUTTER_IMPELLER_CORE_BUFFER_VIEW_H_
#define FLUTTER_IMPELLER_CORE_BUFFER_VIEW_H_
#include "impeller/core/buffer.h"
#include <memory>
#include "impeller/core/range.h"
namespace impeller {
class DeviceBuffer;
struct BufferView {
std::shared_ptr<const Buffer> buffer;
uint8_t* contents;
std::shared_ptr<const DeviceBuffer> buffer;
Range range;
constexpr explicit operator bool() const { return static_cast<bool>(buffer); }

View File

@ -10,18 +10,13 @@ DeviceBuffer::DeviceBuffer(DeviceBufferDescriptor desc) : desc_(desc) {}
DeviceBuffer::~DeviceBuffer() = default;
// |Buffer|
std::shared_ptr<const DeviceBuffer> DeviceBuffer::GetDeviceBuffer() const {
return shared_from_this();
}
void DeviceBuffer::Flush(std::optional<Range> range) const {}
BufferView DeviceBuffer::AsBufferView() const {
// static
BufferView DeviceBuffer::AsBufferView(std::shared_ptr<DeviceBuffer> buffer) {
BufferView view;
view.buffer = shared_from_this();
view.contents = OnGetContents();
view.range = {0u, desc_.size};
view.buffer = std::move(buffer);
view.range = {0u, view.buffer->desc_.size};
return view;
}

View File

@ -9,7 +9,6 @@
#include <string>
#include "impeller/core/allocator.h"
#include "impeller/core/buffer.h"
#include "impeller/core/buffer_view.h"
#include "impeller/core/device_buffer_descriptor.h"
#include "impeller/core/range.h"
@ -17,8 +16,7 @@
namespace impeller {
class DeviceBuffer : public Buffer,
public std::enable_shared_from_this<DeviceBuffer> {
class DeviceBuffer {
public:
virtual ~DeviceBuffer();
@ -30,16 +28,14 @@ class DeviceBuffer : public Buffer,
virtual bool SetLabel(const std::string& label, Range range) = 0;
BufferView AsBufferView() const;
/// @brief Create a buffer view of this entire buffer.
static BufferView AsBufferView(std::shared_ptr<DeviceBuffer> buffer);
virtual std::shared_ptr<Texture> AsTexture(
Allocator& allocator,
const TextureDescriptor& descriptor,
uint16_t row_bytes) const;
// |Buffer|
std::shared_ptr<const DeviceBuffer> GetDeviceBuffer() const;
const DeviceBufferDescriptor& GetDeviceBufferDescriptor() const;
virtual uint8_t* OnGetContents() const = 0;

View File

@ -41,29 +41,29 @@ void HostBuffer::SetLabel(std::string label) {
BufferView HostBuffer::Emplace(const void* buffer,
size_t length,
size_t align) {
auto [data, range, device_buffer] = EmplaceInternal(buffer, length, align);
auto [range, device_buffer] = EmplaceInternal(buffer, length, align);
if (!device_buffer) {
return {};
}
return BufferView{std::move(device_buffer), data, range};
return BufferView{std::move(device_buffer), range};
}
BufferView HostBuffer::Emplace(const void* buffer, size_t length) {
auto [data, range, device_buffer] = EmplaceInternal(buffer, length);
auto [range, device_buffer] = EmplaceInternal(buffer, length);
if (!device_buffer) {
return {};
}
return BufferView{std::move(device_buffer), data, range};
return BufferView{std::move(device_buffer), range};
}
BufferView HostBuffer::Emplace(size_t length,
size_t align,
const EmplaceProc& cb) {
auto [data, range, device_buffer] = EmplaceInternal(length, align, cb);
auto [range, device_buffer] = EmplaceInternal(length, align, cb);
if (!device_buffer) {
return {};
}
return BufferView{std::move(device_buffer), data, range};
return BufferView{std::move(device_buffer), range};
}
HostBuffer::TestStateQuery HostBuffer::GetStateForTest() {
@ -74,10 +74,9 @@ HostBuffer::TestStateQuery HostBuffer::GetStateForTest() {
};
}
void HostBuffer::MaybeCreateNewBuffer(size_t required_size) {
void HostBuffer::MaybeCreateNewBuffer() {
current_buffer_++;
if (current_buffer_ >= device_buffers_[frame_index_].size()) {
FML_DCHECK(required_size <= kAllocatorBlockSize);
DeviceBufferDescriptor desc;
desc.size = kAllocatorBlockSize;
desc.storage_mode = StorageMode::kHostVisible;
@ -86,10 +85,10 @@ void HostBuffer::MaybeCreateNewBuffer(size_t required_size) {
offset_ = 0;
}
std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>>
HostBuffer::EmplaceInternal(size_t length,
size_t align,
const EmplaceProc& cb) {
std::tuple<Range, std::shared_ptr<DeviceBuffer>> HostBuffer::EmplaceInternal(
size_t length,
size_t align,
const EmplaceProc& cb) {
if (!cb) {
return {};
}
@ -108,28 +107,27 @@ HostBuffer::EmplaceInternal(size_t length,
cb(device_buffer->OnGetContents());
device_buffer->Flush(Range{0, length});
}
return std::make_tuple(device_buffer->OnGetContents(), Range{0, length},
device_buffer);
return std::make_tuple(Range{0, length}, device_buffer);
}
auto old_length = GetLength();
if (old_length + length > kAllocatorBlockSize) {
MaybeCreateNewBuffer(length);
MaybeCreateNewBuffer();
}
old_length = GetLength();
auto current_buffer = GetCurrentBuffer();
cb(current_buffer->OnGetContents() + old_length);
auto contents = current_buffer->OnGetContents();
cb(contents + old_length);
current_buffer->Flush(Range{old_length, length});
offset_ += length;
auto contents = current_buffer->OnGetContents();
return std::make_tuple(contents, Range{old_length, length},
std::move(current_buffer));
return std::make_tuple(Range{old_length, length}, std::move(current_buffer));
}
std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>>
HostBuffer::EmplaceInternal(const void* buffer, size_t length) {
std::tuple<Range, std::shared_ptr<DeviceBuffer>> HostBuffer::EmplaceInternal(
const void* buffer,
size_t length) {
// If the requested allocation is bigger than the block size, create a one-off
// device buffer and write to that.
if (length > kAllocatorBlockSize) {
@ -146,38 +144,37 @@ HostBuffer::EmplaceInternal(const void* buffer, size_t length) {
return {};
}
}
return std::make_tuple(device_buffer->OnGetContents(), Range{0, length},
device_buffer);
return std::make_tuple(Range{0, length}, device_buffer);
}
auto old_length = GetLength();
if (old_length + length > kAllocatorBlockSize) {
MaybeCreateNewBuffer(length);
MaybeCreateNewBuffer();
}
old_length = GetLength();
auto current_buffer = GetCurrentBuffer();
auto contents = current_buffer->OnGetContents();
if (buffer) {
::memmove(current_buffer->OnGetContents() + old_length, buffer, length);
::memmove(contents + old_length, buffer, length);
current_buffer->Flush(Range{old_length, length});
}
offset_ += length;
auto contents = current_buffer->OnGetContents();
return std::make_tuple(contents, Range{old_length, length},
std::move(current_buffer));
return std::make_tuple(Range{old_length, length}, std::move(current_buffer));
}
std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>>
std::tuple<Range, std::shared_ptr<DeviceBuffer>>
HostBuffer::EmplaceInternal(const void* buffer, size_t length, size_t align) {
if (align == 0 || (GetLength() % align) == 0) {
return EmplaceInternal(buffer, length);
}
{
auto [buffer, range, device_buffer] =
EmplaceInternal(nullptr, align - (GetLength() % align));
if (!buffer) {
return {};
auto padding = align - (GetLength() % align);
if (offset_ + padding < kAllocatorBlockSize) {
offset_ += padding;
} else {
MaybeCreateNewBuffer();
}
}

View File

@ -12,7 +12,7 @@
#include <string>
#include <type_traits>
#include "impeller/core/buffer.h"
#include "impeller/core/allocator.h"
#include "impeller/core/buffer_view.h"
#include "impeller/core/platform.h"
@ -134,18 +134,18 @@ class HostBuffer {
TestStateQuery GetStateForTest();
private:
[[nodiscard]] std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>>
[[nodiscard]] std::tuple<Range, std::shared_ptr<DeviceBuffer>>
EmplaceInternal(const void* buffer, size_t length);
std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>>
std::tuple<Range, std::shared_ptr<DeviceBuffer>>
EmplaceInternal(size_t length, size_t align, const EmplaceProc& cb);
std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>>
std::tuple<Range, std::shared_ptr<DeviceBuffer>>
EmplaceInternal(const void* buffer, size_t length, size_t align);
size_t GetLength() const { return offset_; }
void MaybeCreateNewBuffer(size_t required_size);
void MaybeCreateNewBuffer();
std::shared_ptr<DeviceBuffer>& GetCurrentBuffer() {
return device_buffers_[frame_index_][current_buffer_];

View File

@ -21,8 +21,8 @@ typename T::VertInfo* GetVertInfo(const Command& command) {
return nullptr;
}
auto data =
(resource->view.resource.contents + resource->view.resource.range.offset);
auto data = (resource->view.resource.buffer->OnGetContents() +
resource->view.resource.range.offset);
return reinterpret_cast<typename T::VertInfo*>(data);
}
@ -39,8 +39,8 @@ typename T::FragInfo* GetFragInfo(const Command& command) {
return nullptr;
}
auto data =
(resource->view.resource.contents + resource->view.resource.range.offset);
auto data = (resource->view.resource.buffer->OnGetContents() +
resource->view.resource.range.offset);
return reinterpret_cast<typename T::FragInfo*>(data);
}

View File

@ -162,10 +162,8 @@ GeometryResult PointFieldGeometry::GetPositionBufferGPU(
buffer_desc.size = total * sizeof(Point);
buffer_desc.storage_mode = StorageMode::kDevicePrivate;
auto geometry_buffer = renderer.GetContext()
->GetResourceAllocator()
->CreateBuffer(buffer_desc)
->AsBufferView();
auto geometry_buffer = DeviceBuffer::AsBufferView(
renderer.GetContext()->GetResourceAllocator()->CreateBuffer(buffer_desc));
BufferView output;
{
@ -197,10 +195,9 @@ GeometryResult PointFieldGeometry::GetPositionBufferGPU(
buffer_desc.size = total * sizeof(Vector4);
buffer_desc.storage_mode = StorageMode::kDevicePrivate;
auto geometry_uv_buffer = renderer.GetContext()
->GetResourceAllocator()
->CreateBuffer(buffer_desc)
->AsBufferView();
auto geometry_uv_buffer = DeviceBuffer::AsBufferView(
renderer.GetContext()->GetResourceAllocator()->CreateBuffer(
buffer_desc));
using UV = UvComputeShader;

View File

@ -422,7 +422,7 @@ static std::shared_ptr<Texture> CreateTextureForDecompressedImage(
return nullptr;
}
blit_pass->SetLabel("Mipmap Blit Pass");
blit_pass->AddCopy(buffer->AsBufferView(), dest_texture);
blit_pass->AddCopy(DeviceBuffer::AsBufferView(buffer), dest_texture);
if (enable_mipmapping) {
blit_pass->GenerateMipmap(dest_texture);
}

View File

@ -232,7 +232,7 @@ bool BufferBindingsGLES::BindUniformBuffer(const ProcTableGLES& gl,
Allocator& transients_allocator,
const BufferResource& buffer) {
const auto* metadata = buffer.GetMetadata();
auto device_buffer = buffer.resource.buffer->GetDeviceBuffer();
auto device_buffer = buffer.resource.buffer;
if (!device_buffer) {
VALIDATION_LOG << "Device buffer not found.";
return false;

View File

@ -386,7 +386,7 @@ struct RenderPassData {
return false;
}
auto vertex_buffer = vertex_buffer_view.buffer->GetDeviceBuffer();
auto vertex_buffer = vertex_buffer_view.buffer;
if (!vertex_buffer) {
return false;
@ -445,7 +445,7 @@ struct RenderPassData {
} else {
// Bind the index buffer if necessary.
auto index_buffer_view = command.vertex_buffer.index_buffer;
auto index_buffer = index_buffer_view.buffer->GetDeviceBuffer();
auto index_buffer = index_buffer_view.buffer;
const auto& index_buffer_gles = DeviceBufferGLES::Cast(*index_buffer);
if (!index_buffer_gles.BindAndUploadDataIfNecessary(
DeviceBufferGLES::BindingType::kElementArrayBuffer)) {

View File

@ -7,7 +7,6 @@
#include "flutter/fml/build_config.h"
#include "flutter/fml/logging.h"
#include "impeller/base/validation.h"
#include "impeller/core/buffer.h"
#include "impeller/renderer/backend/metal/device_buffer_mtl.h"
#include "impeller/renderer/backend/metal/formats_mtl.h"
#include "impeller/renderer/backend/metal/texture_mtl.h"

View File

@ -171,7 +171,7 @@ static bool Bind(ComputePassBindingsCache& pass,
return false;
}
auto device_buffer = view.buffer->GetDeviceBuffer();
auto device_buffer = view.buffer;
if (!device_buffer) {
return false;
}

View File

@ -13,8 +13,9 @@
namespace impeller {
class DeviceBufferMTL final : public DeviceBuffer,
public BackendCast<DeviceBufferMTL, Buffer> {
class DeviceBufferMTL final
: public DeviceBuffer,
public BackendCast<DeviceBufferMTL, DeviceBuffer> {
public:
DeviceBufferMTL();

View File

@ -364,7 +364,7 @@ static bool Bind(PassBindingsCache& pass,
return false;
}
auto device_buffer = view.buffer->GetDeviceBuffer();
auto device_buffer = view.buffer;
if (!device_buffer) {
return false;
}
@ -508,12 +508,7 @@ bool RenderPassMTL::EncodeCommands(const std::shared_ptr<Allocator>& allocator,
if (!index_buffer) {
return false;
}
auto device_buffer = index_buffer->GetDeviceBuffer();
if (!device_buffer) {
return false;
}
auto mtl_index_buffer =
DeviceBufferMTL::Cast(*device_buffer).GetMTLBuffer();
auto mtl_index_buffer = DeviceBufferMTL::Cast(*index_buffer).GetMTLBuffer();
if (!mtl_index_buffer) {
return false;
}

View File

@ -70,7 +70,7 @@ static bool BindBuffers(const Bindings& bindings,
for (const BufferAndUniformSlot& data : bindings.buffers) {
const auto& buffer_view = data.view.resource.buffer;
auto device_buffer = buffer_view->GetDeviceBuffer();
auto device_buffer = buffer_view;
if (!device_buffer) {
VALIDATION_LOG << "Failed to get device buffer for vertex binding";
return false;

View File

@ -54,11 +54,10 @@ TEST(BlitCommandVkTest, BlitCopyBufferToTextureCommandVK) {
.format = PixelFormat::kR8G8B8A8UNormInt,
.size = ISize(100, 100),
});
cmd.source = context->GetResourceAllocator()
->CreateBuffer({
.size = 1,
})
->AsBufferView();
cmd.source =
DeviceBuffer::AsBufferView(context->GetResourceAllocator()->CreateBuffer({
.size = 1,
}));
bool result = cmd.Encode(*encoder.get());
EXPECT_TRUE(result);
EXPECT_TRUE(encoder->IsTracking(cmd.source.buffer));

View File

@ -48,14 +48,14 @@ class TrackedObjectsVK {
tracked_objects_.insert(std::move(object));
}
void Track(std::shared_ptr<const Buffer> buffer) {
void Track(std::shared_ptr<const DeviceBuffer> buffer) {
if (!buffer) {
return;
}
tracked_buffers_.insert(std::move(buffer));
}
bool IsTracking(const std::shared_ptr<const Buffer>& buffer) const {
bool IsTracking(const std::shared_ptr<const DeviceBuffer>& buffer) const {
if (!buffer) {
return false;
}
@ -88,7 +88,7 @@ class TrackedObjectsVK {
std::shared_ptr<CommandPoolVK> pool_;
vk::UniqueCommandBuffer buffer_;
std::set<std::shared_ptr<SharedObjectVK>> tracked_objects_;
std::set<std::shared_ptr<const Buffer>> tracked_buffers_;
std::set<std::shared_ptr<const DeviceBuffer>> tracked_buffers_;
std::set<std::shared_ptr<const TextureSourceVK>> tracked_textures_;
std::unique_ptr<GPUProbe> probe_;
bool is_valid_ = false;
@ -250,7 +250,7 @@ bool CommandEncoderVK::Track(std::shared_ptr<SharedObjectVK> object) {
return true;
}
bool CommandEncoderVK::Track(std::shared_ptr<const Buffer> buffer) {
bool CommandEncoderVK::Track(std::shared_ptr<const DeviceBuffer> buffer) {
if (!IsValid()) {
return false;
}
@ -259,7 +259,7 @@ bool CommandEncoderVK::Track(std::shared_ptr<const Buffer> buffer) {
}
bool CommandEncoderVK::IsTracking(
const std::shared_ptr<const Buffer>& buffer) const {
const std::shared_ptr<const DeviceBuffer>& buffer) const {
if (!IsValid()) {
return false;
}

View File

@ -64,9 +64,9 @@ class CommandEncoderVK {
bool Track(std::shared_ptr<SharedObjectVK> object);
bool Track(std::shared_ptr<const Buffer> buffer);
bool Track(std::shared_ptr<const DeviceBuffer> buffer);
bool IsTracking(const std::shared_ptr<const Buffer>& texture) const;
bool IsTracking(const std::shared_ptr<const DeviceBuffer>& texture) const;
bool Track(const std::shared_ptr<const Texture>& texture);

View File

@ -15,7 +15,7 @@
namespace impeller {
class DeviceBufferVK final : public DeviceBuffer,
public BackendCast<DeviceBufferVK, Buffer> {
public BackendCast<DeviceBufferVK, DeviceBuffer> {
public:
DeviceBufferVK(DeviceBufferDescriptor desc,
std::weak_ptr<Context> context,

View File

@ -410,7 +410,7 @@ static bool EncodeCommand(const Context& context,
}
auto& allocator = *context.GetResourceAllocator();
auto vertex_buffer = vertex_buffer_view.buffer->GetDeviceBuffer();
auto vertex_buffer = vertex_buffer_view.buffer;
if (!vertex_buffer) {
VALIDATION_LOG << "Failed to acquire device buffer"
@ -435,7 +435,7 @@ static bool EncodeCommand(const Context& context,
return false;
}
auto index_buffer = index_buffer_view.buffer->GetDeviceBuffer();
auto index_buffer = index_buffer_view.buffer;
if (!index_buffer) {
VALIDATION_LOG << "Failed to acquire device buffer"
<< " for index buffer view";

View File

@ -71,10 +71,9 @@ TEST_P(ComputeSubgroupTest, PathPlayground) {
"VertexCount");
auto callback = [&](RenderPass& pass) -> bool {
::memset(vertex_buffer_count->AsBufferView().contents, 0,
::memset(vertex_buffer_count->OnGetContents(), 0,
sizeof(SS::VertexBufferCount));
::memset(vertex_buffer->AsBufferView().contents, 0,
sizeof(SS::VertexBuffer<2048>));
::memset(vertex_buffer->OnGetContents(), 0, sizeof(SS::VertexBuffer<2048>));
const auto* main_viewport = ImGui::GetMainViewport();
ImGui::SetNextWindowPos(
ImVec2(main_viewport->WorkPos.x + 650, main_viewport->WorkPos.y + 20));
@ -93,18 +92,18 @@ TEST_P(ComputeSubgroupTest, PathPlayground) {
auto status =
ComputeTessellator{}
.SetStrokeWidth(stroke_width)
.Tessellate(
path, *host_buffer, context, vertex_buffer->AsBufferView(),
vertex_buffer_count->AsBufferView(),
[vertex_buffer_count, &vertex_count,
&promise](CommandBuffer::Status status) {
vertex_count =
reinterpret_cast<SS::VertexBufferCount*>(
vertex_buffer_count->AsBufferView().contents)
->count;
promise.set_value(status ==
CommandBuffer::Status::kCompleted);
});
.Tessellate(path, *host_buffer, context,
DeviceBuffer::AsBufferView(vertex_buffer),
DeviceBuffer::AsBufferView(vertex_buffer_count),
[vertex_buffer_count, &vertex_count,
&promise](CommandBuffer::Status status) {
vertex_count =
reinterpret_cast<SS::VertexBufferCount*>(
vertex_buffer_count->OnGetContents())
->count;
promise.set_value(
status == CommandBuffer::Status::kCompleted);
});
switch (status) {
case ComputeTessellator::Status::kCommandInvalid:
ImGui::Text("Failed to submit compute job (invalid command)");
@ -152,11 +151,11 @@ TEST_P(ComputeSubgroupTest, PathPlayground) {
cmd.pipeline = renderer.GetSolidFillPipeline(options);
auto count = reinterpret_cast<SS::VertexBufferCount*>(
vertex_buffer_count->AsBufferView().contents)
vertex_buffer_count->OnGetContents())
->count;
cmd.BindVertices(
VertexBuffer{.vertex_buffer = vertex_buffer->AsBufferView(),
VertexBuffer{.vertex_buffer = DeviceBuffer::AsBufferView(vertex_buffer),
.vertex_count = count,
.index_type = IndexType::kNone});
@ -313,10 +312,9 @@ TEST_P(ComputeSubgroupTest, LargePath) {
.TakePath();
auto callback = [&](RenderPass& pass) -> bool {
::memset(vertex_buffer_count->AsBufferView().contents, 0,
::memset(vertex_buffer_count->OnGetContents(), 0,
sizeof(SS::VertexBufferCount));
::memset(vertex_buffer->AsBufferView().contents, 0,
sizeof(SS::VertexBuffer<2048>));
::memset(vertex_buffer->OnGetContents(), 0, sizeof(SS::VertexBuffer<2048>));
ContentContext renderer(context, nullptr);
if (!renderer.IsValid()) {
@ -327,10 +325,11 @@ TEST_P(ComputeSubgroupTest, LargePath) {
.SetStrokeWidth(stroke_width)
.Tessellate(
complex_path, renderer.GetTransientsBuffer(), context,
vertex_buffer->AsBufferView(), vertex_buffer_count->AsBufferView(),
DeviceBuffer::AsBufferView(vertex_buffer),
DeviceBuffer::AsBufferView(vertex_buffer_count),
[vertex_buffer_count, &vertex_count](CommandBuffer::Status status) {
vertex_count = reinterpret_cast<SS::VertexBufferCount*>(
vertex_buffer_count->AsBufferView().contents)
vertex_buffer_count->OnGetContents())
->count;
});
@ -354,11 +353,11 @@ TEST_P(ComputeSubgroupTest, LargePath) {
cmd.pipeline = renderer.GetSolidFillPipeline(options);
auto count = reinterpret_cast<SS::VertexBufferCount*>(
vertex_buffer_count->AsBufferView().contents)
vertex_buffer_count->OnGetContents())
->count;
cmd.BindVertices(
VertexBuffer{.vertex_buffer = vertex_buffer->AsBufferView(),
VertexBuffer{.vertex_buffer = DeviceBuffer::AsBufferView(vertex_buffer),
.vertex_count = count,
.index_type = IndexType::kNone});
@ -402,8 +401,8 @@ TEST_P(ComputeSubgroupTest, QuadAndCubicInOnePath) {
auto host_buffer = HostBuffer::Create(context->GetResourceAllocator());
auto status = tessellator.Tessellate(
path, *host_buffer, context, vertex_buffer->AsBufferView(),
vertex_buffer_count->AsBufferView(),
path, *host_buffer, context, DeviceBuffer::AsBufferView(vertex_buffer),
DeviceBuffer::AsBufferView(vertex_buffer_count),
[&latch](CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
latch.Signal();
@ -437,11 +436,11 @@ TEST_P(ComputeSubgroupTest, QuadAndCubicInOnePath) {
cmd.pipeline = renderer.GetSolidFillPipeline(options);
auto count = reinterpret_cast<SS::VertexBufferCount*>(
vertex_buffer_count->AsBufferView().contents)
vertex_buffer_count->OnGetContents())
->count;
cmd.BindVertices(
VertexBuffer{.vertex_buffer = vertex_buffer->AsBufferView(),
VertexBuffer{.vertex_buffer = DeviceBuffer::AsBufferView(vertex_buffer),
.vertex_count = count,
.index_type = IndexType::kNone});
@ -468,11 +467,11 @@ TEST_P(ComputeSubgroupTest, QuadAndCubicInOnePath) {
latch.Wait();
auto vertex_count = reinterpret_cast<SS::VertexBufferCount*>(
vertex_buffer_count->AsBufferView().contents)
vertex_buffer_count->OnGetContents())
->count;
EXPECT_EQ(vertex_count, golden_cubic_and_quad_points.size());
auto vertex_buffer_data = reinterpret_cast<SS::VertexBuffer<2048>*>(
vertex_buffer->AsBufferView().contents);
auto vertex_buffer_data =
reinterpret_cast<SS::VertexBuffer<2048>*>(vertex_buffer->OnGetContents());
for (size_t i = 0; i < vertex_count; i++) {
EXPECT_LT(std::abs(golden_cubic_and_quad_points[i].x -
vertex_buffer_data->position[i].x),

View File

@ -133,7 +133,7 @@ ComputeTessellator::Status ComputeTessellator::Tessellate(
PS::BindQuads(cmd, host_buffer.EmplaceStorageBuffer(quads));
PS::BindLines(cmd, host_buffer.EmplaceStorageBuffer(lines));
PS::BindComponents(cmd, host_buffer.EmplaceStorageBuffer(components));
PS::BindPolyline(cmd, polyline_buffer->AsBufferView());
PS::BindPolyline(cmd, DeviceBuffer::AsBufferView(polyline_buffer));
if (!pass->AddCommand(std::move(cmd))) {
return Status::kCommandInvalid;
@ -164,7 +164,7 @@ ComputeTessellator::Status ComputeTessellator::Tessellate(
};
SS::BindConfig(cmd, host_buffer.EmplaceUniform(config));
SS::BindPolyline(cmd, polyline_buffer->AsBufferView());
SS::BindPolyline(cmd, DeviceBuffer::AsBufferView(polyline_buffer));
SS::BindVertexBufferCount(cmd, std::move(vertex_buffer_count));
SS::BindVertexBuffer(cmd, std::move(vertex_buffer));

View File

@ -79,33 +79,33 @@ TEST_P(ComputeTest, CanCreateComputePass) {
CS::BindInfo(cmd, host_buffer->EmplaceUniform(info));
CS::BindInput0(cmd, host_buffer->EmplaceStorageBuffer(input_0));
CS::BindInput1(cmd, host_buffer->EmplaceStorageBuffer(input_1));
CS::BindOutput(cmd, output_buffer->AsBufferView());
CS::BindOutput(cmd, DeviceBuffer::AsBufferView(output_buffer));
ASSERT_TRUE(pass->AddCommand(std::move(cmd)));
ASSERT_TRUE(pass->EncodeCommands());
fml::AutoResetWaitableEvent latch;
ASSERT_TRUE(
cmd_buffer->SubmitCommands([&latch, output_buffer, &input_0,
&input_1](CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
ASSERT_TRUE(cmd_buffer->SubmitCommands([&latch, output_buffer, &input_0,
&input_1](
CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
auto view = output_buffer->AsBufferView();
EXPECT_EQ(view.range.length, sizeof(CS::Output<kCount>));
auto view = DeviceBuffer::AsBufferView(output_buffer);
EXPECT_EQ(view.range.length, sizeof(CS::Output<kCount>));
CS::Output<kCount>* output =
reinterpret_cast<CS::Output<kCount>*>(view.contents);
EXPECT_TRUE(output);
for (size_t i = 0; i < kCount; i++) {
Vector4 vector = output->elements[i];
Vector4 computed = input_0.elements[i] * input_1.elements[i];
EXPECT_EQ(vector, Vector4(computed.x + 2 + input_1.some_struct.i,
computed.y + 3 + input_1.some_struct.vf.x,
computed.z + 5 + input_1.some_struct.vf.y,
computed.w));
}
latch.Signal();
}));
CS::Output<kCount>* output =
reinterpret_cast<CS::Output<kCount>*>(output_buffer->OnGetContents());
EXPECT_TRUE(output);
for (size_t i = 0; i < kCount; i++) {
Vector4 vector = output->elements[i];
Vector4 computed = input_0.elements[i] * input_1.elements[i];
EXPECT_EQ(vector,
Vector4(computed.x + 2 + input_1.some_struct.i,
computed.y + 3 + input_1.some_struct.vf.x,
computed.z + 5 + input_1.some_struct.vf.y, computed.w));
}
latch.Signal();
}));
latch.Wait();
}
@ -147,30 +147,30 @@ TEST_P(ComputeTest, CanComputePrefixSum) {
context, "Output Buffer");
CS::BindInputData(cmd, host_buffer->EmplaceStorageBuffer(input_data));
CS::BindOutputData(cmd, output_buffer->AsBufferView());
CS::BindOutputData(cmd, DeviceBuffer::AsBufferView(output_buffer));
ASSERT_TRUE(pass->AddCommand(std::move(cmd)));
ASSERT_TRUE(pass->EncodeCommands());
fml::AutoResetWaitableEvent latch;
ASSERT_TRUE(cmd_buffer->SubmitCommands(
[&latch, output_buffer](CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
ASSERT_TRUE(cmd_buffer->SubmitCommands([&latch, output_buffer](
CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
auto view = output_buffer->AsBufferView();
EXPECT_EQ(view.range.length, sizeof(CS::OutputData<kCount>));
auto view = DeviceBuffer::AsBufferView(output_buffer);
EXPECT_EQ(view.range.length, sizeof(CS::OutputData<kCount>));
CS::OutputData<kCount>* output =
reinterpret_cast<CS::OutputData<kCount>*>(view.contents);
EXPECT_TRUE(output);
CS::OutputData<kCount>* output = reinterpret_cast<CS::OutputData<kCount>*>(
output_buffer->OnGetContents());
EXPECT_TRUE(output);
constexpr uint32_t expected[kCount] = {1, 3, 6, 10, 15};
for (size_t i = 0; i < kCount; i++) {
auto computed_sum = output->data[i];
EXPECT_EQ(computed_sum, expected[i]);
}
latch.Signal();
}));
constexpr uint32_t expected[kCount] = {1, 3, 6, 10, 15};
for (size_t i = 0; i < kCount; i++) {
auto computed_sum = output->data[i];
EXPECT_EQ(computed_sum, expected[i]);
}
latch.Signal();
}));
latch.Wait();
}
@ -204,25 +204,25 @@ TEST_P(ComputeTest, 1DThreadgroupSizingIsCorrect) {
auto output_buffer = CreateHostVisibleDeviceBuffer<CS::OutputData<kCount>>(
context, "Output Buffer");
CS::BindOutputData(cmd, output_buffer->AsBufferView());
CS::BindOutputData(cmd, DeviceBuffer::AsBufferView(output_buffer));
ASSERT_TRUE(pass->AddCommand(std::move(cmd)));
ASSERT_TRUE(pass->EncodeCommands());
fml::AutoResetWaitableEvent latch;
ASSERT_TRUE(cmd_buffer->SubmitCommands(
[&latch, output_buffer](CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
ASSERT_TRUE(cmd_buffer->SubmitCommands([&latch, output_buffer](
CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
auto view = output_buffer->AsBufferView();
EXPECT_EQ(view.range.length, sizeof(CS::OutputData<kCount>));
auto view = DeviceBuffer::AsBufferView(output_buffer);
EXPECT_EQ(view.range.length, sizeof(CS::OutputData<kCount>));
CS::OutputData<kCount>* output =
reinterpret_cast<CS::OutputData<kCount>*>(view.contents);
EXPECT_TRUE(output);
EXPECT_EQ(output->data[kCount - 1], kCount - 1);
latch.Signal();
}));
CS::OutputData<kCount>* output = reinterpret_cast<CS::OutputData<kCount>*>(
output_buffer->OnGetContents());
EXPECT_TRUE(output);
EXPECT_EQ(output->data[kCount - 1], kCount - 1);
latch.Signal();
}));
latch.Wait();
}
@ -263,7 +263,7 @@ TEST_P(ComputeTest, CanComputePrefixSumLargeInteractive) {
context, "Output Buffer");
CS::BindInputData(cmd, host_buffer->EmplaceStorageBuffer(input_data));
CS::BindOutputData(cmd, output_buffer->AsBufferView());
CS::BindOutputData(cmd, DeviceBuffer::AsBufferView(output_buffer));
pass->AddCommand(std::move(cmd));
pass->EncodeCommands();
@ -330,7 +330,7 @@ TEST_P(ComputeTest, MultiStageInputAndOutput) {
cmd.pipeline = compute_pipeline_1;
CS1::BindInput(cmd, host_buffer->EmplaceStorageBuffer(input_1));
CS1::BindOutput(cmd, output_buffer_1->AsBufferView());
CS1::BindOutput(cmd, DeviceBuffer::AsBufferView(output_buffer_1));
ASSERT_TRUE(pass->AddCommand(std::move(cmd)));
}
@ -339,8 +339,8 @@ TEST_P(ComputeTest, MultiStageInputAndOutput) {
ComputeCommand cmd;
cmd.pipeline = compute_pipeline_2;
CS1::BindInput(cmd, output_buffer_1->AsBufferView());
CS2::BindOutput(cmd, output_buffer_2->AsBufferView());
CS1::BindInput(cmd, DeviceBuffer::AsBufferView(output_buffer_1));
CS2::BindOutput(cmd, DeviceBuffer::AsBufferView(output_buffer_2));
ASSERT_TRUE(pass->AddCommand(std::move(cmd)));
}
@ -353,14 +353,14 @@ TEST_P(ComputeTest, MultiStageInputAndOutput) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
CS1::Output<kCount2>* output_1 = reinterpret_cast<CS1::Output<kCount2>*>(
output_buffer_1->AsBufferView().contents);
output_buffer_1->OnGetContents());
EXPECT_TRUE(output_1);
EXPECT_EQ(output_1->count, 10u);
EXPECT_THAT(output_1->elements,
::testing::ElementsAre(0, 0, 2, 3, 4, 6, 6, 9, 8, 12));
CS2::Output<kCount2>* output_2 = reinterpret_cast<CS2::Output<kCount2>*>(
output_buffer_2->AsBufferView().contents);
output_buffer_2->OnGetContents());
EXPECT_TRUE(output_2);
EXPECT_EQ(output_2->count, 10u);
EXPECT_THAT(output_2->elements,
@ -417,33 +417,33 @@ TEST_P(ComputeTest, CanCompute1DimensionalData) {
CS::BindInfo(cmd, host_buffer->EmplaceUniform(info));
CS::BindInput0(cmd, host_buffer->EmplaceStorageBuffer(input_0));
CS::BindInput1(cmd, host_buffer->EmplaceStorageBuffer(input_1));
CS::BindOutput(cmd, output_buffer->AsBufferView());
CS::BindOutput(cmd, DeviceBuffer::AsBufferView(output_buffer));
ASSERT_TRUE(pass->AddCommand(std::move(cmd)));
ASSERT_TRUE(pass->EncodeCommands());
fml::AutoResetWaitableEvent latch;
ASSERT_TRUE(
cmd_buffer->SubmitCommands([&latch, output_buffer, &input_0,
&input_1](CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
ASSERT_TRUE(cmd_buffer->SubmitCommands([&latch, output_buffer, &input_0,
&input_1](
CommandBuffer::Status status) {
EXPECT_EQ(status, CommandBuffer::Status::kCompleted);
auto view = output_buffer->AsBufferView();
EXPECT_EQ(view.range.length, sizeof(CS::Output<kCount>));
auto view = DeviceBuffer::AsBufferView(output_buffer);
EXPECT_EQ(view.range.length, sizeof(CS::Output<kCount>));
CS::Output<kCount>* output =
reinterpret_cast<CS::Output<kCount>*>(view.contents);
EXPECT_TRUE(output);
for (size_t i = 0; i < kCount; i++) {
Vector4 vector = output->elements[i];
Vector4 computed = input_0.elements[i] * input_1.elements[i];
EXPECT_EQ(vector, Vector4(computed.x + 2 + input_1.some_struct.i,
computed.y + 3 + input_1.some_struct.vf.x,
computed.z + 5 + input_1.some_struct.vf.y,
computed.w));
}
latch.Signal();
}));
CS::Output<kCount>* output =
reinterpret_cast<CS::Output<kCount>*>(output_buffer->OnGetContents());
EXPECT_TRUE(output);
for (size_t i = 0; i < kCount; i++) {
Vector4 vector = output->elements[i];
Vector4 computed = input_0.elements[i] * input_1.elements[i];
EXPECT_EQ(vector,
Vector4(computed.x + 2 + input_1.some_struct.i,
computed.y + 3 + input_1.some_struct.vf.x,
computed.z + 5 + input_1.some_struct.vf.y, computed.w));
}
latch.Signal();
}));
latch.Wait();
}
@ -496,7 +496,7 @@ TEST_P(ComputeTest, ReturnsEarlyWhenAnyGridDimensionIsZero) {
CS::BindInfo(cmd, host_buffer->EmplaceUniform(info));
CS::BindInput0(cmd, host_buffer->EmplaceStorageBuffer(input_0));
CS::BindInput1(cmd, host_buffer->EmplaceStorageBuffer(input_1));
CS::BindOutput(cmd, output_buffer->AsBufferView());
CS::BindOutput(cmd, DeviceBuffer::AsBufferView(output_buffer));
ASSERT_TRUE(pass->AddCommand(std::move(cmd)));
ASSERT_FALSE(pass->EncodeCommands());

View File

@ -688,10 +688,10 @@ TEST_P(RendererTest, CanBlitTextureToBuffer) {
FS::BindFragInfo(cmd, host_buffer->EmplaceUniform(frag_info));
auto sampler = context->GetSamplerLibrary()->GetSampler({});
auto buffer_view = device_buffer->AsBufferView();
auto buffer_view = DeviceBuffer::AsBufferView(device_buffer);
auto texture =
context->GetResourceAllocator()->CreateTexture(texture_desc);
if (!texture->SetContents(buffer_view.contents,
if (!texture->SetContents(device_buffer->OnGetContents(),
buffer_view.range.length)) {
VALIDATION_LOG << "Could not upload texture to device memory";
return false;

View File

@ -127,7 +127,7 @@ class VertexBufferBuilder {
if (!label_.empty()) {
buffer->SetLabel(SPrintF("%s Vertices", label_.c_str()));
}
return buffer->AsBufferView();
return DeviceBuffer::AsBufferView(buffer);
}
std::vector<IndexType> CreateIndexBuffer() const { return indices_; }
@ -156,7 +156,7 @@ class VertexBufferBuilder {
if (!label_.empty()) {
buffer->SetLabel(SPrintF("%s Indices", label_.c_str()));
}
return buffer->AsBufferView();
return DeviceBuffer::AsBufferView(buffer);
}
};

View File

@ -333,7 +333,8 @@ static std::pair<sk_sp<DlImage>, std::string> UnsafeUploadTextureToPrivate(
return std::make_pair(nullptr, decode_error);
}
blit_pass->SetLabel("Mipmap Blit Pass");
blit_pass->AddCopy(buffer->AsBufferView(), dest_texture);
blit_pass->AddCopy(impeller::DeviceBuffer::AsBufferView(buffer),
dest_texture);
if (texture_descriptor.size.MipCount() > 1) {
blit_pass->GenerateMipmap(dest_texture);
}

View File

@ -34,7 +34,7 @@ sk_sp<SkImage> ConvertBufferToSkImage(
const std::shared_ptr<impeller::DeviceBuffer>& buffer,
SkColorType color_type,
SkISize dimensions) {
auto buffer_view = buffer->AsBufferView();
auto buffer_view = impeller::DeviceBuffer::AsBufferView(buffer);
SkImageInfo image_info = SkImageInfo::Make(dimensions, color_type,
SkAlphaType::kPremul_SkAlphaType);
@ -47,7 +47,7 @@ sk_sp<SkImage> ConvertBufferToSkImage(
delete buffer;
};
auto bytes_per_pixel = image_info.bytesPerPixel();
bitmap.installPixels(image_info, buffer_view.contents,
bitmap.installPixels(image_info, buffer->OnGetContents(),
dimensions.width() * bytes_per_pixel, func,
new std::shared_ptr<impeller::DeviceBuffer>(buffer));
bitmap.setImmutable();