2022-04-23 04:59:50 -04:00
|
|
|
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2018-06-10 18:02:33 -04:00
|
|
|
|
2022-08-14 05:36:36 -04:00
|
|
|
#include "common/algorithm.h"
|
2019-02-15 22:05:17 -05:00
|
|
|
#include "common/assert.h"
|
2019-03-05 20:25:01 -05:00
|
|
|
#include "common/logging/log.h"
|
2021-07-14 13:04:45 -04:00
|
|
|
#include "common/microprofile.h"
|
2021-04-14 19:07:40 -04:00
|
|
|
#include "common/settings.h"
|
2018-11-06 15:26:27 -05:00
|
|
|
#include "core/core.h"
|
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
2018-06-10 18:02:33 -04:00
|
|
|
#include "video_core/engines/maxwell_dma.h"
|
2019-04-05 18:21:15 -04:00
|
|
|
#include "video_core/memory_manager.h"
|
2019-02-18 20:58:32 -05:00
|
|
|
#include "video_core/renderer_base.h"
|
2018-06-10 18:02:33 -04:00
|
|
|
#include "video_core/textures/decoders.h"
|
|
|
|
|
2021-07-14 13:04:45 -04:00
|
|
|
MICROPROFILE_DECLARE(GPU_DMAEngine);
|
|
|
|
MICROPROFILE_DEFINE(GPU_DMAEngine, "GPU", "DMA Engine", MP_RGB(224, 224, 128));
|
|
|
|
|
2018-10-20 15:58:06 -04:00
|
|
|
namespace Tegra::Engines {
|
2018-06-10 18:02:33 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
using namespace Texture;
|
|
|
|
|
2020-12-04 14:39:12 -05:00
|
|
|
MaxwellDMA::MaxwellDMA(Core::System& system_, MemoryManager& memory_manager_)
|
|
|
|
: system{system_}, memory_manager{memory_manager_} {}
|
|
|
|
|
|
|
|
MaxwellDMA::~MaxwellDMA() = default;
|
2018-06-10 18:02:33 -04:00
|
|
|
|
2021-07-10 12:19:10 -04:00
|
|
|
void MaxwellDMA::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
|
|
|
|
rasterizer = rasterizer_;
|
|
|
|
}
|
|
|
|
|
2020-04-27 21:47:58 -04:00
|
|
|
void MaxwellDMA::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
|
2020-07-03 21:00:30 -04:00
|
|
|
ASSERT_MSG(method < NUM_REGS, "Invalid MaxwellDMA register");
|
2018-06-10 18:02:33 -04:00
|
|
|
|
2020-04-27 21:47:58 -04:00
|
|
|
regs.reg_array[method] = method_argument;
|
2018-06-10 18:02:33 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
if (method == offsetof(Regs, launch_dma) / sizeof(u32)) {
|
|
|
|
Launch();
|
2018-06-10 18:02:33 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 13:42:14 -04:00
|
|
|
void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
|
|
|
|
u32 methods_pending) {
|
2020-07-03 21:00:30 -04:00
|
|
|
for (size_t i = 0; i < amount; ++i) {
|
2020-04-27 21:47:58 -04:00
|
|
|
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
|
2020-04-20 02:16:56 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
void MaxwellDMA::Launch() {
|
2021-07-14 13:04:45 -04:00
|
|
|
MICROPROFILE_SCOPE(GPU_DMAEngine);
|
2020-07-04 17:42:10 -04:00
|
|
|
LOG_TRACE(Render_OpenGL, "DMA copy 0x{:x} -> 0x{:x}", static_cast<GPUVAddr>(regs.offset_in),
|
|
|
|
static_cast<GPUVAddr>(regs.offset_out));
|
2018-06-10 18:02:33 -04:00
|
|
|
|
|
|
|
// TODO(Subv): Perform more research and implement all features of this engine.
|
2020-07-03 21:00:30 -04:00
|
|
|
const LaunchDMA& launch = regs.launch_dma;
|
|
|
|
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
|
|
|
|
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
|
|
|
|
|
|
|
|
const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
|
|
|
|
const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
|
|
|
|
|
|
|
|
if (!is_src_pitch && !is_dst_pitch) {
|
2018-10-17 21:29:10 -04:00
|
|
|
// If both the source and the destination are in block layout, assert.
|
2022-06-07 17:02:29 -04:00
|
|
|
UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
|
2018-10-17 21:29:10 -04:00
|
|
|
return;
|
|
|
|
}
|
2018-09-08 17:02:16 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
if (is_src_pitch && is_dst_pitch) {
|
|
|
|
CopyPitchToPitch();
|
|
|
|
} else {
|
|
|
|
ASSERT(launch.multi_line_enable == 1);
|
2018-09-08 17:02:16 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
if (!is_src_pitch && is_dst_pitch) {
|
|
|
|
CopyBlockLinearToPitch();
|
|
|
|
} else {
|
|
|
|
CopyPitchToBlockLinear();
|
2018-10-17 21:29:10 -04:00
|
|
|
}
|
2018-07-02 10:46:33 -04:00
|
|
|
}
|
2022-02-21 00:11:33 -05:00
|
|
|
ReleaseSemaphore();
|
2020-07-03 21:00:30 -04:00
|
|
|
}
|
2018-06-10 18:02:33 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
void MaxwellDMA::CopyPitchToPitch() {
|
2021-09-19 20:36:41 -04:00
|
|
|
// When `multi_line_enable` bit is enabled we copy a 2D image of dimensions
|
|
|
|
// (line_length_in, line_count).
|
|
|
|
// Otherwise the copy is performed as if we were copying a 1D buffer of length line_length_in.
|
|
|
|
const bool remap_enabled = regs.launch_dma.remap_enable != 0;
|
|
|
|
if (regs.launch_dma.multi_line_enable) {
|
|
|
|
UNIMPLEMENTED_IF(remap_enabled);
|
|
|
|
|
|
|
|
// Perform a line-by-line copy.
|
|
|
|
// We're going to take a subrect of size (line_length_in, line_count) from the source
|
|
|
|
// rectangle. There is no need to manually flush/invalidate the regions because CopyBlock
|
|
|
|
// does that for us.
|
|
|
|
for (u32 line = 0; line < regs.line_count; ++line) {
|
|
|
|
const GPUVAddr source_line = regs.offset_in + static_cast<size_t>(line) * regs.pitch_in;
|
|
|
|
const GPUVAddr dest_line = regs.offset_out + static_cast<size_t>(line) * regs.pitch_out;
|
|
|
|
memory_manager.CopyBlock(dest_line, source_line, regs.line_length_in);
|
2021-07-10 12:19:10 -04:00
|
|
|
}
|
2020-07-03 21:00:30 -04:00
|
|
|
return;
|
|
|
|
}
|
2021-09-19 20:36:41 -04:00
|
|
|
// TODO: allow multisized components.
|
|
|
|
auto& accelerate = rasterizer->AccessAccelerateDMA();
|
|
|
|
const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A;
|
|
|
|
const bool is_buffer_clear = remap_enabled && is_const_a_dst;
|
|
|
|
if (is_buffer_clear) {
|
|
|
|
ASSERT(regs.remap_const.component_size_minus_one == 3);
|
|
|
|
accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value);
|
|
|
|
std::vector<u32> tmp_buffer(regs.line_length_in, regs.remap_consta_value);
|
|
|
|
memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(tmp_buffer.data()),
|
|
|
|
regs.line_length_in * sizeof(u32));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
UNIMPLEMENTED_IF(remap_enabled);
|
|
|
|
if (!accelerate.BufferCopy(regs.offset_in, regs.offset_out, regs.line_length_in)) {
|
|
|
|
std::vector<u8> tmp_buffer(regs.line_length_in);
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_in, tmp_buffer.data(), regs.line_length_in);
|
|
|
|
memory_manager.WriteBlock(regs.offset_out, tmp_buffer.data(), regs.line_length_in);
|
2020-07-03 21:00:30 -04:00
|
|
|
}
|
|
|
|
}
|
2019-03-09 14:36:52 -05:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
void MaxwellDMA::CopyBlockLinearToPitch() {
|
2020-12-30 00:25:23 -05:00
|
|
|
UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
|
2020-07-30 00:38:44 -04:00
|
|
|
UNIMPLEMENTED_IF(regs.src_params.layer != 0);
|
2019-03-09 14:36:52 -05:00
|
|
|
|
2022-08-14 05:36:36 -04:00
|
|
|
const bool is_remapping = regs.launch_dma.remap_enable != 0;
|
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
// Optimized path for micro copies.
|
|
|
|
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
|
2022-08-14 05:36:36 -04:00
|
|
|
if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
|
2021-06-03 18:52:40 -04:00
|
|
|
regs.src_params.height > GOB_SIZE_Y) {
|
2020-07-03 21:00:30 -04:00
|
|
|
FastCopyBlockLinearToPitch();
|
|
|
|
return;
|
|
|
|
}
|
2018-10-17 21:29:10 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
// Deswizzle the input and copy it over.
|
2021-07-10 12:19:10 -04:00
|
|
|
UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
|
2020-07-03 21:00:30 -04:00
|
|
|
const Parameters& src_params = regs.src_params;
|
2022-08-14 05:36:36 -04:00
|
|
|
|
|
|
|
const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
|
|
|
|
const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
|
|
|
|
|
|
|
|
const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
|
|
|
|
|
|
|
|
u32 width = src_params.width;
|
|
|
|
u32 x_elements = regs.line_length_in;
|
|
|
|
u32 x_offset = src_params.origin.x;
|
|
|
|
u32 bpp_shift = 0U;
|
|
|
|
if (!is_remapping) {
|
|
|
|
bpp_shift = Common::FoldRight(
|
|
|
|
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
|
|
|
|
width, x_elements, x_offset, static_cast<u32>(regs.offset_in));
|
|
|
|
width >>= bpp_shift;
|
|
|
|
x_elements >>= bpp_shift;
|
|
|
|
x_offset >>= bpp_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
const u32 bytes_per_pixel = base_bpp << bpp_shift;
|
2020-07-03 21:00:30 -04:00
|
|
|
const u32 height = src_params.height;
|
|
|
|
const u32 depth = src_params.depth;
|
|
|
|
const u32 block_height = src_params.block_size.height;
|
|
|
|
const u32 block_depth = src_params.block_size.depth;
|
|
|
|
const size_t src_size =
|
|
|
|
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
|
|
|
|
|
|
|
if (read_buffer.size() < src_size) {
|
|
|
|
read_buffer.resize(src_size);
|
|
|
|
}
|
|
|
|
if (write_buffer.size() < dst_size) {
|
|
|
|
write_buffer.resize(dst_size);
|
|
|
|
}
|
2018-10-17 21:29:10 -04:00
|
|
|
|
2020-07-30 00:38:44 -04:00
|
|
|
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
2018-10-17 21:29:10 -04:00
|
|
|
|
2022-08-14 05:36:36 -04:00
|
|
|
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
|
|
|
src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
|
|
|
|
regs.pitch_out);
|
2018-10-17 21:29:10 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
}
|
2018-10-17 21:29:10 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
void MaxwellDMA::CopyPitchToBlockLinear() {
|
2020-12-30 00:25:23 -05:00
|
|
|
UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one");
|
2022-08-14 05:36:36 -04:00
|
|
|
UNIMPLEMENTED_IF(regs.dst_params.layer != 0);
|
2021-07-10 12:19:10 -04:00
|
|
|
UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
|
2020-12-30 00:25:23 -05:00
|
|
|
|
2022-08-14 05:36:36 -04:00
|
|
|
const bool is_remapping = regs.launch_dma.remap_enable != 0;
|
|
|
|
const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
|
|
|
|
const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
|
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
const auto& dst_params = regs.dst_params;
|
2022-08-14 05:36:36 -04:00
|
|
|
|
|
|
|
const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
|
|
|
|
|
|
|
|
u32 width = dst_params.width;
|
|
|
|
u32 x_elements = regs.line_length_in;
|
|
|
|
u32 x_offset = dst_params.origin.x;
|
|
|
|
u32 bpp_shift = 0U;
|
|
|
|
if (!is_remapping) {
|
|
|
|
bpp_shift = Common::FoldRight(
|
|
|
|
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
|
|
|
|
width, x_elements, x_offset, static_cast<u32>(regs.offset_out));
|
|
|
|
width >>= bpp_shift;
|
|
|
|
x_elements >>= bpp_shift;
|
|
|
|
x_offset >>= bpp_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
const u32 bytes_per_pixel = base_bpp << bpp_shift;
|
2020-07-03 21:00:30 -04:00
|
|
|
const u32 height = dst_params.height;
|
|
|
|
const u32 depth = dst_params.depth;
|
|
|
|
const u32 block_height = dst_params.block_size.height;
|
|
|
|
const u32 block_depth = dst_params.block_size.depth;
|
|
|
|
const size_t dst_size =
|
|
|
|
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
|
|
|
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
|
|
|
|
|
|
|
|
if (read_buffer.size() < src_size) {
|
|
|
|
read_buffer.resize(src_size);
|
|
|
|
}
|
|
|
|
if (write_buffer.size() < dst_size) {
|
|
|
|
write_buffer.resize(dst_size);
|
|
|
|
}
|
2018-10-17 21:29:10 -04:00
|
|
|
|
2022-08-14 05:36:36 -04:00
|
|
|
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
2021-07-14 10:44:53 -04:00
|
|
|
if (Settings::IsGPULevelExtreme()) {
|
|
|
|
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
} else {
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
}
|
2018-10-17 21:29:10 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
2022-08-14 05:36:36 -04:00
|
|
|
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
|
|
|
dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
|
|
|
|
regs.pitch_in);
|
2019-04-23 12:41:55 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
}
|
2019-04-23 12:41:55 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
2022-08-14 05:36:36 -04:00
|
|
|
const u32 bytes_per_pixel = 1U;
|
2020-07-04 17:42:10 -04:00
|
|
|
const size_t src_size = GOB_SIZE;
|
2020-07-03 21:00:30 -04:00
|
|
|
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
|
|
|
|
u32 pos_x = regs.src_params.origin.x;
|
|
|
|
u32 pos_y = regs.src_params.origin.y;
|
|
|
|
const u64 offset = GetGOBOffset(regs.src_params.width, regs.src_params.height, pos_x, pos_y,
|
|
|
|
regs.src_params.block_size.height, bytes_per_pixel);
|
|
|
|
const u32 x_in_gob = 64 / bytes_per_pixel;
|
|
|
|
pos_x = pos_x % x_in_gob;
|
|
|
|
pos_y = pos_y % 8;
|
|
|
|
|
|
|
|
if (read_buffer.size() < src_size) {
|
|
|
|
read_buffer.resize(src_size);
|
|
|
|
}
|
|
|
|
if (write_buffer.size() < dst_size) {
|
|
|
|
write_buffer.resize(dst_size);
|
|
|
|
}
|
2019-04-23 12:41:55 -04:00
|
|
|
|
2020-07-03 21:00:30 -04:00
|
|
|
if (Settings::IsGPULevelExtreme()) {
|
|
|
|
memory_manager.ReadBlock(regs.offset_in + offset, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
|
|
|
} else {
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_in + offset, read_buffer.data(), src_size);
|
|
|
|
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
2018-06-10 18:02:33 -04:00
|
|
|
}
|
2020-07-03 21:00:30 -04:00
|
|
|
|
2022-08-14 05:36:36 -04:00
|
|
|
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width,
|
|
|
|
regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count,
|
|
|
|
regs.src_params.block_size.height, regs.src_params.block_size.depth,
|
|
|
|
regs.pitch_out);
|
2020-07-03 21:00:30 -04:00
|
|
|
|
|
|
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
2018-06-10 18:02:33 -04:00
|
|
|
}
|
|
|
|
|
2022-02-21 00:11:33 -05:00
|
|
|
void MaxwellDMA::ReleaseSemaphore() {
|
|
|
|
const auto type = regs.launch_dma.semaphore_type;
|
|
|
|
const GPUVAddr address = regs.semaphore.address;
|
|
|
|
switch (type) {
|
|
|
|
case LaunchDMA::SemaphoreType::NONE:
|
|
|
|
break;
|
|
|
|
case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE:
|
|
|
|
memory_manager.Write<u32>(address, regs.semaphore.payload);
|
|
|
|
break;
|
|
|
|
case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE:
|
|
|
|
memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload));
|
|
|
|
memory_manager.Write<u64>(address + 8, system.GPU().GetTicks());
|
|
|
|
break;
|
|
|
|
default:
|
2022-06-07 17:02:29 -04:00
|
|
|
ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
|
2022-02-21 00:11:33 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-20 15:58:06 -04:00
|
|
|
} // namespace Tegra::Engines
|