mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-29 23:06:23 -05:00
Merge pull request #2042 from ReinUsesLisp/nouveau-tex
maxwell_3d: Allow texture handles with TIC id zero
This commit is contained in:
commit
10ab714fe0
11 changed files with 82 additions and 79 deletions
|
@ -35,8 +35,10 @@ void DmaPusher::DispatchCalls() {
|
||||||
bool DmaPusher::Step() {
|
bool DmaPusher::Step() {
|
||||||
if (dma_get != dma_put) {
|
if (dma_get != dma_put) {
|
||||||
// Push buffer non-empty, read a word
|
// Push buffer non-empty, read a word
|
||||||
const CommandHeader command_header{
|
const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get);
|
||||||
Memory::Read32(*gpu.MemoryManager().GpuToCpuAddress(dma_get))};
|
ASSERT_MSG(address, "Invalid GPU address");
|
||||||
|
|
||||||
|
const CommandHeader command_header{Memory::Read32(*address)};
|
||||||
|
|
||||||
dma_get += sizeof(u32);
|
dma_get += sizeof(u32);
|
||||||
|
|
||||||
|
|
|
@ -42,8 +42,10 @@ void Fermi2D::HandleSurfaceCopy() {
|
||||||
// TODO(Subv): Only raw copies are implemented.
|
// TODO(Subv): Only raw copies are implemented.
|
||||||
ASSERT(regs.operation == Regs::Operation::SrcCopy);
|
ASSERT(regs.operation == Regs::Operation::SrcCopy);
|
||||||
|
|
||||||
const VAddr source_cpu = *memory_manager.GpuToCpuAddress(source);
|
const auto source_cpu = memory_manager.GpuToCpuAddress(source);
|
||||||
const VAddr dest_cpu = *memory_manager.GpuToCpuAddress(dest);
|
const auto dest_cpu = memory_manager.GpuToCpuAddress(dest);
|
||||||
|
ASSERT_MSG(source_cpu, "Invalid source GPU address");
|
||||||
|
ASSERT_MSG(dest_cpu, "Invalid destination GPU address");
|
||||||
|
|
||||||
u32 src_bytes_per_pixel = RenderTargetBytesPerPixel(regs.src.format);
|
u32 src_bytes_per_pixel = RenderTargetBytesPerPixel(regs.src.format);
|
||||||
u32 dst_bytes_per_pixel = RenderTargetBytesPerPixel(regs.dst.format);
|
u32 dst_bytes_per_pixel = RenderTargetBytesPerPixel(regs.dst.format);
|
||||||
|
@ -52,22 +54,22 @@ void Fermi2D::HandleSurfaceCopy() {
|
||||||
// All copies here update the main memory, so mark all rasterizer states as invalid.
|
// All copies here update the main memory, so mark all rasterizer states as invalid.
|
||||||
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
|
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
|
||||||
|
|
||||||
rasterizer.FlushRegion(source_cpu, src_bytes_per_pixel * regs.src.width * regs.src.height);
|
rasterizer.FlushRegion(*source_cpu, src_bytes_per_pixel * regs.src.width * regs.src.height);
|
||||||
// We have to invalidate the destination region to evict any outdated surfaces from the
|
// We have to invalidate the destination region to evict any outdated surfaces from the
|
||||||
// cache. We do this before actually writing the new data because the destination address
|
// cache. We do this before actually writing the new data because the destination address
|
||||||
// might contain a dirty surface that will have to be written back to memory.
|
// might contain a dirty surface that will have to be written back to memory.
|
||||||
rasterizer.InvalidateRegion(dest_cpu,
|
rasterizer.InvalidateRegion(*dest_cpu,
|
||||||
dst_bytes_per_pixel * regs.dst.width * regs.dst.height);
|
dst_bytes_per_pixel * regs.dst.width * regs.dst.height);
|
||||||
|
|
||||||
if (regs.src.linear == regs.dst.linear) {
|
if (regs.src.linear == regs.dst.linear) {
|
||||||
// If the input layout and the output layout are the same, just perform a raw copy.
|
// If the input layout and the output layout are the same, just perform a raw copy.
|
||||||
ASSERT(regs.src.BlockHeight() == regs.dst.BlockHeight());
|
ASSERT(regs.src.BlockHeight() == regs.dst.BlockHeight());
|
||||||
Memory::CopyBlock(dest_cpu, source_cpu,
|
Memory::CopyBlock(*dest_cpu, *source_cpu,
|
||||||
src_bytes_per_pixel * regs.dst.width * regs.dst.height);
|
src_bytes_per_pixel * regs.dst.width * regs.dst.height);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
u8* src_buffer = Memory::GetPointer(source_cpu);
|
u8* src_buffer = Memory::GetPointer(*source_cpu);
|
||||||
u8* dst_buffer = Memory::GetPointer(dest_cpu);
|
u8* dst_buffer = Memory::GetPointer(*dest_cpu);
|
||||||
if (!regs.src.linear && regs.dst.linear) {
|
if (!regs.src.linear && regs.dst.linear) {
|
||||||
// If the input is tiled and the output is linear, deswizzle the input and copy it over.
|
// If the input is tiled and the output is linear, deswizzle the input and copy it over.
|
||||||
Texture::CopySwizzledData(regs.src.width, regs.src.height, regs.src.depth,
|
Texture::CopySwizzledData(regs.src.width, regs.src.height, regs.src.depth,
|
||||||
|
|
|
@ -39,16 +39,17 @@ void KeplerMemory::ProcessData(u32 data) {
|
||||||
ASSERT_MSG(regs.exec.linear, "Non-linear uploads are not supported");
|
ASSERT_MSG(regs.exec.linear, "Non-linear uploads are not supported");
|
||||||
ASSERT(regs.dest.x == 0 && regs.dest.y == 0 && regs.dest.z == 0);
|
ASSERT(regs.dest.x == 0 && regs.dest.y == 0 && regs.dest.z == 0);
|
||||||
|
|
||||||
GPUVAddr address = regs.dest.Address();
|
const GPUVAddr address = regs.dest.Address();
|
||||||
VAddr dest_address =
|
const auto dest_address =
|
||||||
*memory_manager.GpuToCpuAddress(address + state.write_offset * sizeof(u32));
|
memory_manager.GpuToCpuAddress(address + state.write_offset * sizeof(u32));
|
||||||
|
ASSERT_MSG(dest_address, "Invalid GPU address");
|
||||||
|
|
||||||
// We have to invalidate the destination region to evict any outdated surfaces from the cache.
|
// We have to invalidate the destination region to evict any outdated surfaces from the cache.
|
||||||
// We do this before actually writing the new data because the destination address might contain
|
// We do this before actually writing the new data because the destination address might contain
|
||||||
// a dirty surface that will have to be written back to memory.
|
// a dirty surface that will have to be written back to memory.
|
||||||
rasterizer.InvalidateRegion(dest_address, sizeof(u32));
|
rasterizer.InvalidateRegion(*dest_address, sizeof(u32));
|
||||||
|
|
||||||
Memory::Write32(dest_address, data);
|
Memory::Write32(*dest_address, data);
|
||||||
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
|
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
|
||||||
|
|
||||||
state.write_offset++;
|
state.write_offset++;
|
||||||
|
|
|
@ -273,7 +273,8 @@ void Maxwell3D::ProcessQueryGet() {
|
||||||
GPUVAddr sequence_address = regs.query.QueryAddress();
|
GPUVAddr sequence_address = regs.query.QueryAddress();
|
||||||
// Since the sequence address is given as a GPU VAddr, we have to convert it to an application
|
// Since the sequence address is given as a GPU VAddr, we have to convert it to an application
|
||||||
// VAddr before writing.
|
// VAddr before writing.
|
||||||
std::optional<VAddr> address = memory_manager.GpuToCpuAddress(sequence_address);
|
const auto address = memory_manager.GpuToCpuAddress(sequence_address);
|
||||||
|
ASSERT_MSG(address, "Invalid GPU address");
|
||||||
|
|
||||||
// TODO(Subv): Support the other query units.
|
// TODO(Subv): Support the other query units.
|
||||||
ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop,
|
ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop,
|
||||||
|
@ -386,14 +387,14 @@ void Maxwell3D::ProcessCBBind(Regs::ShaderStage stage) {
|
||||||
|
|
||||||
void Maxwell3D::ProcessCBData(u32 value) {
|
void Maxwell3D::ProcessCBData(u32 value) {
|
||||||
// Write the input value to the current const buffer at the current position.
|
// Write the input value to the current const buffer at the current position.
|
||||||
GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
|
const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
|
||||||
ASSERT(buffer_address != 0);
|
ASSERT(buffer_address != 0);
|
||||||
|
|
||||||
// Don't allow writing past the end of the buffer.
|
// Don't allow writing past the end of the buffer.
|
||||||
ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
|
ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size);
|
||||||
|
|
||||||
std::optional<VAddr> address =
|
const auto address = memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos);
|
||||||
memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos);
|
ASSERT_MSG(address, "Invalid GPU address");
|
||||||
|
|
||||||
Memory::Write32(*address, value);
|
Memory::Write32(*address, value);
|
||||||
dirty_flags.OnMemoryWrite();
|
dirty_flags.OnMemoryWrite();
|
||||||
|
@ -403,10 +404,11 @@ void Maxwell3D::ProcessCBData(u32 value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
||||||
GPUVAddr tic_base_address = regs.tic.TICAddress();
|
const GPUVAddr tic_base_address = regs.tic.TICAddress();
|
||||||
|
|
||||||
GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry);
|
const GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry);
|
||||||
std::optional<VAddr> tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu);
|
const auto tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu);
|
||||||
|
ASSERT_MSG(tic_address_cpu, "Invalid GPU address");
|
||||||
|
|
||||||
Texture::TICEntry tic_entry;
|
Texture::TICEntry tic_entry;
|
||||||
Memory::ReadBlock(*tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
|
Memory::ReadBlock(*tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry));
|
||||||
|
@ -415,10 +417,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
||||||
tic_entry.header_version == Texture::TICHeaderVersion::Pitch,
|
tic_entry.header_version == Texture::TICHeaderVersion::Pitch,
|
||||||
"TIC versions other than BlockLinear or Pitch are unimplemented");
|
"TIC versions other than BlockLinear or Pitch are unimplemented");
|
||||||
|
|
||||||
auto r_type = tic_entry.r_type.Value();
|
const auto r_type = tic_entry.r_type.Value();
|
||||||
auto g_type = tic_entry.g_type.Value();
|
const auto g_type = tic_entry.g_type.Value();
|
||||||
auto b_type = tic_entry.b_type.Value();
|
const auto b_type = tic_entry.b_type.Value();
|
||||||
auto a_type = tic_entry.a_type.Value();
|
const auto a_type = tic_entry.a_type.Value();
|
||||||
|
|
||||||
// TODO(Subv): Different data types for separate components are not supported
|
// TODO(Subv): Different data types for separate components are not supported
|
||||||
ASSERT(r_type == g_type && r_type == b_type && r_type == a_type);
|
ASSERT(r_type == g_type && r_type == b_type && r_type == a_type);
|
||||||
|
@ -427,10 +429,11 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
|
Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const {
|
||||||
GPUVAddr tsc_base_address = regs.tsc.TSCAddress();
|
const GPUVAddr tsc_base_address = regs.tsc.TSCAddress();
|
||||||
|
|
||||||
GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry);
|
const GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry);
|
||||||
std::optional<VAddr> tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu);
|
const auto tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu);
|
||||||
|
ASSERT_MSG(tsc_address_cpu, "Invalid GPU address");
|
||||||
|
|
||||||
Texture::TSCEntry tsc_entry;
|
Texture::TSCEntry tsc_entry;
|
||||||
Memory::ReadBlock(*tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
Memory::ReadBlock(*tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry));
|
||||||
|
@ -452,8 +455,10 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt
|
||||||
for (GPUVAddr current_texture = tex_info_buffer.address + TextureInfoOffset;
|
for (GPUVAddr current_texture = tex_info_buffer.address + TextureInfoOffset;
|
||||||
current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) {
|
current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) {
|
||||||
|
|
||||||
Texture::TextureHandle tex_handle{
|
const auto address = memory_manager.GpuToCpuAddress(current_texture);
|
||||||
Memory::Read32(*memory_manager.GpuToCpuAddress(current_texture))};
|
ASSERT_MSG(address, "Invalid GPU address");
|
||||||
|
|
||||||
|
const Texture::TextureHandle tex_handle{Memory::Read32(*address)};
|
||||||
|
|
||||||
Texture::FullTextureInfo tex_info{};
|
Texture::FullTextureInfo tex_info{};
|
||||||
// TODO(Subv): Use the shader to determine which textures are actually accessed.
|
// TODO(Subv): Use the shader to determine which textures are actually accessed.
|
||||||
|
@ -462,22 +467,15 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt
|
||||||
sizeof(Texture::TextureHandle);
|
sizeof(Texture::TextureHandle);
|
||||||
|
|
||||||
// Load the TIC data.
|
// Load the TIC data.
|
||||||
if (tex_handle.tic_id != 0) {
|
|
||||||
tex_info.enabled = true;
|
|
||||||
|
|
||||||
auto tic_entry = GetTICEntry(tex_handle.tic_id);
|
auto tic_entry = GetTICEntry(tex_handle.tic_id);
|
||||||
// TODO(Subv): Workaround for BitField's move constructor being deleted.
|
// TODO(Subv): Workaround for BitField's move constructor being deleted.
|
||||||
std::memcpy(&tex_info.tic, &tic_entry, sizeof(tic_entry));
|
std::memcpy(&tex_info.tic, &tic_entry, sizeof(tic_entry));
|
||||||
}
|
|
||||||
|
|
||||||
// Load the TSC data
|
// Load the TSC data
|
||||||
if (tex_handle.tsc_id != 0) {
|
|
||||||
auto tsc_entry = GetTSCEntry(tex_handle.tsc_id);
|
auto tsc_entry = GetTSCEntry(tex_handle.tsc_id);
|
||||||
// TODO(Subv): Workaround for BitField's move constructor being deleted.
|
// TODO(Subv): Workaround for BitField's move constructor being deleted.
|
||||||
std::memcpy(&tex_info.tsc, &tsc_entry, sizeof(tsc_entry));
|
std::memcpy(&tex_info.tsc, &tsc_entry, sizeof(tsc_entry));
|
||||||
}
|
|
||||||
|
|
||||||
if (tex_info.enabled)
|
|
||||||
textures.push_back(tex_info);
|
textures.push_back(tex_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -490,31 +488,28 @@ Texture::FullTextureInfo Maxwell3D::GetStageTexture(Regs::ShaderStage stage,
|
||||||
auto& tex_info_buffer = shader.const_buffers[regs.tex_cb_index];
|
auto& tex_info_buffer = shader.const_buffers[regs.tex_cb_index];
|
||||||
ASSERT(tex_info_buffer.enabled && tex_info_buffer.address != 0);
|
ASSERT(tex_info_buffer.enabled && tex_info_buffer.address != 0);
|
||||||
|
|
||||||
GPUVAddr tex_info_address = tex_info_buffer.address + offset * sizeof(Texture::TextureHandle);
|
const GPUVAddr tex_info_address =
|
||||||
|
tex_info_buffer.address + offset * sizeof(Texture::TextureHandle);
|
||||||
|
|
||||||
ASSERT(tex_info_address < tex_info_buffer.address + tex_info_buffer.size);
|
ASSERT(tex_info_address < tex_info_buffer.address + tex_info_buffer.size);
|
||||||
|
|
||||||
std::optional<VAddr> tex_address_cpu = memory_manager.GpuToCpuAddress(tex_info_address);
|
const auto tex_address_cpu = memory_manager.GpuToCpuAddress(tex_info_address);
|
||||||
Texture::TextureHandle tex_handle{Memory::Read32(*tex_address_cpu)};
|
ASSERT_MSG(tex_address_cpu, "Invalid GPU address");
|
||||||
|
|
||||||
|
const Texture::TextureHandle tex_handle{Memory::Read32(*tex_address_cpu)};
|
||||||
|
|
||||||
Texture::FullTextureInfo tex_info{};
|
Texture::FullTextureInfo tex_info{};
|
||||||
tex_info.index = static_cast<u32>(offset);
|
tex_info.index = static_cast<u32>(offset);
|
||||||
|
|
||||||
// Load the TIC data.
|
// Load the TIC data.
|
||||||
if (tex_handle.tic_id != 0) {
|
|
||||||
tex_info.enabled = true;
|
|
||||||
|
|
||||||
auto tic_entry = GetTICEntry(tex_handle.tic_id);
|
auto tic_entry = GetTICEntry(tex_handle.tic_id);
|
||||||
// TODO(Subv): Workaround for BitField's move constructor being deleted.
|
// TODO(Subv): Workaround for BitField's move constructor being deleted.
|
||||||
std::memcpy(&tex_info.tic, &tic_entry, sizeof(tic_entry));
|
std::memcpy(&tex_info.tic, &tic_entry, sizeof(tic_entry));
|
||||||
}
|
|
||||||
|
|
||||||
// Load the TSC data
|
// Load the TSC data
|
||||||
if (tex_handle.tsc_id != 0) {
|
|
||||||
auto tsc_entry = GetTSCEntry(tex_handle.tsc_id);
|
auto tsc_entry = GetTSCEntry(tex_handle.tsc_id);
|
||||||
// TODO(Subv): Workaround for BitField's move constructor being deleted.
|
// TODO(Subv): Workaround for BitField's move constructor being deleted.
|
||||||
std::memcpy(&tex_info.tsc, &tsc_entry, sizeof(tsc_entry));
|
std::memcpy(&tex_info.tsc, &tsc_entry, sizeof(tsc_entry));
|
||||||
}
|
|
||||||
|
|
||||||
return tex_info;
|
return tex_info;
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,8 +39,10 @@ void MaxwellDMA::HandleCopy() {
|
||||||
const GPUVAddr source = regs.src_address.Address();
|
const GPUVAddr source = regs.src_address.Address();
|
||||||
const GPUVAddr dest = regs.dst_address.Address();
|
const GPUVAddr dest = regs.dst_address.Address();
|
||||||
|
|
||||||
const VAddr source_cpu = *memory_manager.GpuToCpuAddress(source);
|
const auto source_cpu = memory_manager.GpuToCpuAddress(source);
|
||||||
const VAddr dest_cpu = *memory_manager.GpuToCpuAddress(dest);
|
const auto dest_cpu = memory_manager.GpuToCpuAddress(dest);
|
||||||
|
ASSERT_MSG(source_cpu, "Invalid source GPU address");
|
||||||
|
ASSERT_MSG(dest_cpu, "Invalid destination GPU address");
|
||||||
|
|
||||||
// TODO(Subv): Perform more research and implement all features of this engine.
|
// TODO(Subv): Perform more research and implement all features of this engine.
|
||||||
ASSERT(regs.exec.enable_swizzle == 0);
|
ASSERT(regs.exec.enable_swizzle == 0);
|
||||||
|
@ -64,7 +66,7 @@ void MaxwellDMA::HandleCopy() {
|
||||||
// buffer of length `x_count`, otherwise we copy a 2D image of dimensions (x_count,
|
// buffer of length `x_count`, otherwise we copy a 2D image of dimensions (x_count,
|
||||||
// y_count).
|
// y_count).
|
||||||
if (!regs.exec.enable_2d) {
|
if (!regs.exec.enable_2d) {
|
||||||
Memory::CopyBlock(dest_cpu, source_cpu, regs.x_count);
|
Memory::CopyBlock(*dest_cpu, *source_cpu, regs.x_count);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,8 +75,8 @@ void MaxwellDMA::HandleCopy() {
|
||||||
// rectangle. There is no need to manually flush/invalidate the regions because
|
// rectangle. There is no need to manually flush/invalidate the regions because
|
||||||
// CopyBlock does that for us.
|
// CopyBlock does that for us.
|
||||||
for (u32 line = 0; line < regs.y_count; ++line) {
|
for (u32 line = 0; line < regs.y_count; ++line) {
|
||||||
const VAddr source_line = source_cpu + line * regs.src_pitch;
|
const VAddr source_line = *source_cpu + line * regs.src_pitch;
|
||||||
const VAddr dest_line = dest_cpu + line * regs.dst_pitch;
|
const VAddr dest_line = *dest_cpu + line * regs.dst_pitch;
|
||||||
Memory::CopyBlock(dest_line, source_line, regs.x_count);
|
Memory::CopyBlock(dest_line, source_line, regs.x_count);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -87,12 +89,12 @@ void MaxwellDMA::HandleCopy() {
|
||||||
const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) {
|
const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) {
|
||||||
// TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
|
// TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
|
||||||
// copying.
|
// copying.
|
||||||
rasterizer.FlushRegion(source_cpu, src_size);
|
rasterizer.FlushRegion(*source_cpu, src_size);
|
||||||
|
|
||||||
// We have to invalidate the destination region to evict any outdated surfaces from the
|
// We have to invalidate the destination region to evict any outdated surfaces from the
|
||||||
// cache. We do this before actually writing the new data because the destination address
|
// cache. We do this before actually writing the new data because the destination address
|
||||||
// might contain a dirty surface that will have to be written back to memory.
|
// might contain a dirty surface that will have to be written back to memory.
|
||||||
rasterizer.InvalidateRegion(dest_cpu, dst_size);
|
rasterizer.InvalidateRegion(*dest_cpu, dst_size);
|
||||||
};
|
};
|
||||||
|
|
||||||
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
|
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
|
||||||
|
@ -105,8 +107,8 @@ void MaxwellDMA::HandleCopy() {
|
||||||
copy_size * src_bytes_per_pixel);
|
copy_size * src_bytes_per_pixel);
|
||||||
|
|
||||||
Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch,
|
Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch,
|
||||||
regs.src_params.size_x, src_bytes_per_pixel, source_cpu, dest_cpu,
|
regs.src_params.size_x, src_bytes_per_pixel, *source_cpu,
|
||||||
regs.src_params.BlockHeight(), regs.src_params.pos_x,
|
*dest_cpu, regs.src_params.BlockHeight(), regs.src_params.pos_x,
|
||||||
regs.src_params.pos_y);
|
regs.src_params.pos_y);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(regs.dst_params.size_z == 1);
|
ASSERT(regs.dst_params.size_z == 1);
|
||||||
|
@ -119,7 +121,7 @@ void MaxwellDMA::HandleCopy() {
|
||||||
|
|
||||||
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
||||||
Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x,
|
Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x,
|
||||||
src_bpp, dest_cpu, source_cpu, regs.dst_params.BlockHeight());
|
src_bpp, *dest_cpu, *source_cpu, regs.dst_params.BlockHeight());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -154,7 +154,8 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) {
|
||||||
const VAddr base_addr{PageSlot(gpu_addr)};
|
const VAddr base_addr{PageSlot(gpu_addr)};
|
||||||
|
|
||||||
if (base_addr == static_cast<u64>(PageStatus::Allocated) ||
|
if (base_addr == static_cast<u64>(PageStatus::Allocated) ||
|
||||||
base_addr == static_cast<u64>(PageStatus::Unmapped)) {
|
base_addr == static_cast<u64>(PageStatus::Unmapped) ||
|
||||||
|
base_addr == static_cast<u64>(PageStatus::Reserved)) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,8 @@ OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size)
|
||||||
GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size,
|
GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size,
|
||||||
std::size_t alignment, bool cache) {
|
std::size_t alignment, bool cache) {
|
||||||
auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
|
auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
|
||||||
const std::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
||||||
|
ASSERT_MSG(cpu_addr, "Invalid GPU address");
|
||||||
|
|
||||||
// Cache management is a big overhead, so only cache entries with a given size.
|
// Cache management is a big overhead, so only cache entries with a given size.
|
||||||
// TODO: Figure out which size is the best for given games.
|
// TODO: Figure out which size is the best for given games.
|
||||||
|
|
|
@ -46,7 +46,9 @@ GLintptr PrimitiveAssembler::MakeQuadIndexed(Tegra::GPUVAddr gpu_addr, std::size
|
||||||
auto [dst_pointer, index_offset] = buffer_cache.ReserveMemory(map_size);
|
auto [dst_pointer, index_offset] = buffer_cache.ReserveMemory(map_size);
|
||||||
|
|
||||||
auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
|
auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
|
||||||
const std::optional<VAddr> cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
const auto cpu_addr{memory_manager.GpuToCpuAddress(gpu_addr)};
|
||||||
|
ASSERT_MSG(cpu_addr, "Invalid GPU address");
|
||||||
|
|
||||||
const u8* source{Memory::GetPointer(*cpu_addr)};
|
const u8* source{Memory::GetPointer(*cpu_addr)};
|
||||||
|
|
||||||
for (u32 primitive = 0; primitive < count / 4; ++primitive) {
|
for (u32 primitive = 0; primitive < count / 4; ++primitive) {
|
||||||
|
|
|
@ -1008,10 +1008,6 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s
|
||||||
auto& unit = state.texture_units[current_bindpoint];
|
auto& unit = state.texture_units[current_bindpoint];
|
||||||
|
|
||||||
const auto texture = maxwell3d.GetStageTexture(entry.GetStage(), entry.GetOffset());
|
const auto texture = maxwell3d.GetStageTexture(entry.GetStage(), entry.GetOffset());
|
||||||
if (!texture.enabled) {
|
|
||||||
unit.texture = 0;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc);
|
texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc);
|
||||||
|
|
||||||
|
|
|
@ -23,8 +23,10 @@ using VideoCommon::Shader::ProgramCode;
|
||||||
static VAddr GetShaderAddress(Maxwell::ShaderProgram program) {
|
static VAddr GetShaderAddress(Maxwell::ShaderProgram program) {
|
||||||
const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
|
const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
|
||||||
const auto& shader_config = gpu.regs.shader_config[static_cast<std::size_t>(program)];
|
const auto& shader_config = gpu.regs.shader_config[static_cast<std::size_t>(program)];
|
||||||
return *gpu.memory_manager.GpuToCpuAddress(gpu.regs.code_address.CodeAddress() +
|
const auto address = gpu.memory_manager.GpuToCpuAddress(gpu.regs.code_address.CodeAddress() +
|
||||||
shader_config.offset);
|
shader_config.offset);
|
||||||
|
ASSERT_MSG(address, "Invalid GPU address");
|
||||||
|
return *address;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the shader program code from memory for the specified address
|
/// Gets the shader program code from memory for the specified address
|
||||||
|
|
|
@ -317,7 +317,6 @@ struct FullTextureInfo {
|
||||||
u32 index;
|
u32 index;
|
||||||
TICEntry tic;
|
TICEntry tic;
|
||||||
TSCEntry tsc;
|
TSCEntry tsc;
|
||||||
bool enabled;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Returns the number of bytes per pixel of the input texture format.
|
/// Returns the number of bytes per pixel of the input texture format.
|
||||||
|
|
Loading…
Reference in a new issue