mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-26 21:36:27 -05:00
Merge pull request #2411 from FernandoS27/unsafe-gpu
GPU Manager: Implement ReadBlockUnsafe and WriteBlockUnsafe
This commit is contained in:
commit
68b707711a
5 changed files with 99 additions and 15 deletions
|
@ -57,8 +57,8 @@ bool DmaPusher::Step() {
|
||||||
|
|
||||||
// Push buffer non-empty, read a word
|
// Push buffer non-empty, read a word
|
||||||
command_headers.resize(command_list_header.size);
|
command_headers.resize(command_list_header.size);
|
||||||
gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(),
|
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
|
||||||
command_list_header.size * sizeof(u32));
|
command_list_header.size * sizeof(u32));
|
||||||
|
|
||||||
for (const CommandHeader& command_header : command_headers) {
|
for (const CommandHeader& command_header : command_headers) {
|
||||||
|
|
||||||
|
|
|
@ -418,7 +418,7 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
||||||
const GPUVAddr tic_address_gpu{regs.tic.TICAddress() + tic_index * sizeof(Texture::TICEntry)};
|
const GPUVAddr tic_address_gpu{regs.tic.TICAddress() + tic_index * sizeof(Texture::TICEntry)};
|
||||||
|
|
||||||
Texture::TICEntry tic_entry;
|
Texture::TICEntry tic_entry;
|
||||||
memory_manager.ReadBlock(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
|
memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
|
||||||
|
|
||||||
ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear ||
|
ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear ||
|
||||||
tic_entry.header_version == Texture::TICHeaderVersion::Pitch,
|
tic_entry.header_version == Texture::TICHeaderVersion::Pitch,
|
||||||
|
@ -439,7 +439,7 @@ Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
|
||||||
const GPUVAddr tsc_address_gpu{regs.tsc.TSCAddress() + tsc_index * sizeof(Texture::TSCEntry)};
|
const GPUVAddr tsc_address_gpu{regs.tsc.TSCAddress() + tsc_index * sizeof(Texture::TSCEntry)};
|
||||||
|
|
||||||
Texture::TSCEntry tsc_entry;
|
Texture::TSCEntry tsc_entry;
|
||||||
memory_manager.ReadBlock(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
memory_manager.ReadBlockUnsafe(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
||||||
return tsc_entry;
|
return tsc_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -199,7 +199,15 @@ const u8* MemoryManager::GetPointer(GPUVAddr addr) const {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const {
|
bool MemoryManager::IsBlockContinous(const GPUVAddr start, const std::size_t size) {
|
||||||
|
const GPUVAddr end = start + size;
|
||||||
|
const auto host_ptr_start = reinterpret_cast<std::uintptr_t>(GetPointer(start));
|
||||||
|
const auto host_ptr_end = reinterpret_cast<std::uintptr_t>(GetPointer(end));
|
||||||
|
const std::size_t range = static_cast<std::size_t>(host_ptr_end - host_ptr_start);
|
||||||
|
return range == size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const {
|
||||||
std::size_t remaining_size{size};
|
std::size_t remaining_size{size};
|
||||||
std::size_t page_index{src_addr >> page_bits};
|
std::size_t page_index{src_addr >> page_bits};
|
||||||
std::size_t page_offset{src_addr & page_mask};
|
std::size_t page_offset{src_addr & page_mask};
|
||||||
|
@ -226,7 +234,30 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size) {
|
void MemoryManager::ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer,
|
||||||
|
const std::size_t size) const {
|
||||||
|
std::size_t remaining_size{size};
|
||||||
|
std::size_t page_index{src_addr >> page_bits};
|
||||||
|
std::size_t page_offset{src_addr & page_mask};
|
||||||
|
|
||||||
|
while (remaining_size > 0) {
|
||||||
|
const std::size_t copy_amount{
|
||||||
|
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
|
||||||
|
const u8* page_pointer = page_table.pointers[page_index];
|
||||||
|
if (page_pointer) {
|
||||||
|
const u8* src_ptr{page_pointer + page_offset};
|
||||||
|
std::memcpy(dest_buffer, src_ptr, copy_amount);
|
||||||
|
} else {
|
||||||
|
std::memset(dest_buffer, 0, copy_amount);
|
||||||
|
}
|
||||||
|
page_index++;
|
||||||
|
page_offset = 0;
|
||||||
|
dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
|
||||||
|
remaining_size -= copy_amount;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const std::size_t size) {
|
||||||
std::size_t remaining_size{size};
|
std::size_t remaining_size{size};
|
||||||
std::size_t page_index{dest_addr >> page_bits};
|
std::size_t page_index{dest_addr >> page_bits};
|
||||||
std::size_t page_offset{dest_addr & page_mask};
|
std::size_t page_offset{dest_addr & page_mask};
|
||||||
|
@ -253,7 +284,28 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size) {
|
void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer,
|
||||||
|
const std::size_t size) {
|
||||||
|
std::size_t remaining_size{size};
|
||||||
|
std::size_t page_index{dest_addr >> page_bits};
|
||||||
|
std::size_t page_offset{dest_addr & page_mask};
|
||||||
|
|
||||||
|
while (remaining_size > 0) {
|
||||||
|
const std::size_t copy_amount{
|
||||||
|
std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
|
||||||
|
u8* page_pointer = page_table.pointers[page_index];
|
||||||
|
if (page_pointer) {
|
||||||
|
u8* dest_ptr{page_pointer + page_offset};
|
||||||
|
std::memcpy(dest_ptr, src_buffer, copy_amount);
|
||||||
|
}
|
||||||
|
page_index++;
|
||||||
|
page_offset = 0;
|
||||||
|
src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
|
||||||
|
remaining_size -= copy_amount;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) {
|
||||||
std::size_t remaining_size{size};
|
std::size_t remaining_size{size};
|
||||||
std::size_t page_index{src_addr >> page_bits};
|
std::size_t page_index{src_addr >> page_bits};
|
||||||
std::size_t page_offset{src_addr & page_mask};
|
std::size_t page_offset{src_addr & page_mask};
|
||||||
|
@ -281,6 +333,12 @@ void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) {
|
||||||
|
std::vector<u8> tmp_buffer(size);
|
||||||
|
ReadBlockUnsafe(src_addr, tmp_buffer.data(), size);
|
||||||
|
WriteBlockUnsafe(dest_addr, tmp_buffer.data(), size);
|
||||||
|
}
|
||||||
|
|
||||||
void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
|
void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type,
|
||||||
VAddr backing_addr) {
|
VAddr backing_addr) {
|
||||||
LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size,
|
LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size,
|
||||||
|
|
|
@ -65,9 +65,32 @@ public:
|
||||||
u8* GetPointer(GPUVAddr addr);
|
u8* GetPointer(GPUVAddr addr);
|
||||||
const u8* GetPointer(GPUVAddr addr) const;
|
const u8* GetPointer(GPUVAddr addr) const;
|
||||||
|
|
||||||
void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const;
|
// Returns true if the block is continous in host memory, false otherwise
|
||||||
void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size);
|
bool IsBlockContinous(const GPUVAddr start, const std::size_t size);
|
||||||
void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size);
|
|
||||||
|
/**
|
||||||
|
* ReadBlock and WriteBlock are full read and write operations over virtual
|
||||||
|
* GPU Memory. It's important to use these when GPU memory may not be continous
|
||||||
|
* in the Host Memory counterpart. Note: This functions cause Host GPU Memory
|
||||||
|
* Flushes and Invalidations, respectively to each operation.
|
||||||
|
*/
|
||||||
|
void ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const;
|
||||||
|
void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const std::size_t size);
|
||||||
|
void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ReadBlockUnsafe and WriteBlockUnsafe are special versions of ReadBlock and
|
||||||
|
* WriteBlock respectively. In this versions, no flushing or invalidation is actually
|
||||||
|
* done and their performance is similar to a memcpy. This functions can be used
|
||||||
|
* on either of this 2 scenarios instead of their safe counterpart:
|
||||||
|
* - Memory which is sure to never be represented in the Host GPU.
|
||||||
|
* - Memory Managed by a Cache Manager. Example: Texture Flushing should use
|
||||||
|
* WriteBlockUnsafe instead of WriteBlock since it shouldn't invalidate the texture
|
||||||
|
* being flushed.
|
||||||
|
*/
|
||||||
|
void ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const;
|
||||||
|
void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, const std::size_t size);
|
||||||
|
void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>;
|
using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>;
|
||||||
|
|
|
@ -38,13 +38,15 @@ GPUVAddr GetShaderAddress(Maxwell::ShaderProgram program) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the shader program code from memory for the specified address
|
/// Gets the shader program code from memory for the specified address
|
||||||
ProgramCode GetShaderCode(const u8* host_ptr) {
|
ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
|
||||||
|
const u8* host_ptr) {
|
||||||
ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
|
ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
|
||||||
ASSERT_OR_EXECUTE(host_ptr != nullptr, {
|
ASSERT_OR_EXECUTE(host_ptr != nullptr, {
|
||||||
std::fill(program_code.begin(), program_code.end(), 0);
|
std::fill(program_code.begin(), program_code.end(), 0);
|
||||||
return program_code;
|
return program_code;
|
||||||
});
|
});
|
||||||
std::memcpy(program_code.data(), host_ptr, program_code.size() * sizeof(u64));
|
memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(),
|
||||||
|
program_code.size() * sizeof(u64));
|
||||||
return program_code;
|
return program_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,11 +499,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
|
||||||
|
|
||||||
if (!shader) {
|
if (!shader) {
|
||||||
// No shader found - create a new one
|
// No shader found - create a new one
|
||||||
ProgramCode program_code{GetShaderCode(host_ptr)};
|
ProgramCode program_code{GetShaderCode(memory_manager, program_addr, host_ptr)};
|
||||||
ProgramCode program_code_b;
|
ProgramCode program_code_b;
|
||||||
if (program == Maxwell::ShaderProgram::VertexA) {
|
if (program == Maxwell::ShaderProgram::VertexA) {
|
||||||
program_code_b = GetShaderCode(
|
const GPUVAddr program_addr_b{GetShaderAddress(Maxwell::ShaderProgram::VertexB)};
|
||||||
memory_manager.GetPointer(GetShaderAddress(Maxwell::ShaderProgram::VertexB)));
|
program_code_b = GetShaderCode(memory_manager, program_addr_b,
|
||||||
|
memory_manager.GetPointer(program_addr_b));
|
||||||
}
|
}
|
||||||
const u64 unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b);
|
const u64 unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b);
|
||||||
const VAddr cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)};
|
const VAddr cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)};
|
||||||
|
|
Loading…
Reference in a new issue