2018-02-11 23:44:12 -05:00
|
|
|
// Copyright 2018 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2018-09-04 07:54:50 -04:00
|
|
|
#include <array>
|
2019-06-07 12:56:30 -04:00
|
|
|
#include <atomic>
|
2019-12-30 07:03:20 -05:00
|
|
|
#include <condition_variable>
|
2019-06-07 12:56:30 -04:00
|
|
|
#include <list>
|
2018-02-11 23:44:12 -05:00
|
|
|
#include <memory>
|
2019-06-08 16:45:25 -04:00
|
|
|
#include <mutex>
|
2018-02-11 23:44:12 -05:00
|
|
|
#include "common/common_types.h"
|
2019-06-07 12:56:30 -04:00
|
|
|
#include "core/hle/service/nvdrv/nvdata.h"
|
2018-03-23 14:58:27 -04:00
|
|
|
#include "core/hle/service/nvflinger/buffer_queue.h"
|
2020-10-26 23:07:36 -04:00
|
|
|
#include "video_core/cdma_pusher.h"
|
2018-11-23 23:20:56 -05:00
|
|
|
#include "video_core/dma_pusher.h"
|
2018-02-11 23:44:12 -05:00
|
|
|
|
2019-02-18 20:58:32 -05:00
|
|
|
using CacheAddr = std::uintptr_t;
|
|
|
|
inline CacheAddr ToCacheAddr(const void* host_ptr) {
|
|
|
|
return reinterpret_cast<CacheAddr>(host_ptr);
|
|
|
|
}
|
|
|
|
|
2019-07-19 10:50:40 -04:00
|
|
|
inline u8* FromCacheAddr(CacheAddr cache_addr) {
|
|
|
|
return reinterpret_cast<u8*>(cache_addr);
|
|
|
|
}
|
|
|
|
|
2019-02-15 22:05:17 -05:00
|
|
|
namespace Core {
|
2020-03-24 22:58:49 -04:00
|
|
|
namespace Frontend {
|
|
|
|
class EmuWindow;
|
2019-02-15 22:05:17 -05:00
|
|
|
}
|
2020-03-24 22:58:49 -04:00
|
|
|
class System;
|
|
|
|
} // namespace Core
|
2019-02-15 22:05:17 -05:00
|
|
|
|
2018-08-03 12:55:58 -04:00
|
|
|
namespace VideoCore {
|
2019-01-07 23:32:02 -05:00
|
|
|
class RendererBase;
|
2020-07-09 23:36:38 -04:00
|
|
|
class ShaderNotify;
|
2019-01-07 23:32:02 -05:00
|
|
|
} // namespace VideoCore
|
2018-08-03 12:55:58 -04:00
|
|
|
|
2018-02-11 23:44:12 -05:00
|
|
|
namespace Tegra {
|
|
|
|
|
2018-03-24 00:45:24 -04:00
|
|
|
enum class RenderTargetFormat : u32 {
|
2018-03-25 17:57:53 -04:00
|
|
|
NONE = 0x0,
|
2020-07-01 01:28:53 -04:00
|
|
|
R32B32G32A32_FLOAT = 0xC0,
|
|
|
|
R32G32B32A32_SINT = 0xC1,
|
|
|
|
R32G32B32A32_UINT = 0xC2,
|
|
|
|
R16G16B16A16_UNORM = 0xC6,
|
|
|
|
R16G16B16A16_SNORM = 0xC7,
|
|
|
|
R16G16B16A16_SINT = 0xC8,
|
|
|
|
R16G16B16A16_UINT = 0xC9,
|
|
|
|
R16G16B16A16_FLOAT = 0xCA,
|
|
|
|
R32G32_FLOAT = 0xCB,
|
|
|
|
R32G32_SINT = 0xCC,
|
|
|
|
R32G32_UINT = 0xCD,
|
|
|
|
R16G16B16X16_FLOAT = 0xCE,
|
|
|
|
B8G8R8A8_UNORM = 0xCF,
|
|
|
|
B8G8R8A8_SRGB = 0xD0,
|
|
|
|
A2B10G10R10_UNORM = 0xD1,
|
|
|
|
A2B10G10R10_UINT = 0xD2,
|
|
|
|
A8B8G8R8_UNORM = 0xD5,
|
|
|
|
A8B8G8R8_SRGB = 0xD6,
|
|
|
|
A8B8G8R8_SNORM = 0xD7,
|
|
|
|
A8B8G8R8_SINT = 0xD8,
|
|
|
|
A8B8G8R8_UINT = 0xD9,
|
|
|
|
R16G16_UNORM = 0xDA,
|
|
|
|
R16G16_SNORM = 0xDB,
|
|
|
|
R16G16_SINT = 0xDC,
|
|
|
|
R16G16_UINT = 0xDD,
|
|
|
|
R16G16_FLOAT = 0xDE,
|
|
|
|
B10G11R11_FLOAT = 0xE0,
|
2020-02-25 15:19:34 -05:00
|
|
|
R32_SINT = 0xE3,
|
2018-08-13 08:55:16 -04:00
|
|
|
R32_UINT = 0xE4,
|
2018-08-01 09:31:42 -04:00
|
|
|
R32_FLOAT = 0xE5,
|
2020-07-01 01:28:53 -04:00
|
|
|
R5G6B5_UNORM = 0xE8,
|
|
|
|
A1R5G5B5_UNORM = 0xE9,
|
|
|
|
R8G8_UNORM = 0xEA,
|
|
|
|
R8G8_SNORM = 0xEB,
|
|
|
|
R8G8_SINT = 0xEC,
|
|
|
|
R8G8_UINT = 0xED,
|
2018-08-11 14:01:50 -04:00
|
|
|
R16_UNORM = 0xEE,
|
|
|
|
R16_SNORM = 0xEF,
|
|
|
|
R16_SINT = 0xF0,
|
|
|
|
R16_UINT = 0xF1,
|
2018-07-26 00:19:15 -04:00
|
|
|
R16_FLOAT = 0xF2,
|
2018-07-24 17:47:50 -04:00
|
|
|
R8_UNORM = 0xF3,
|
2020-06-30 02:51:42 -04:00
|
|
|
R8_SNORM = 0xF4,
|
2020-06-30 03:00:23 -04:00
|
|
|
R8_SINT = 0xF5,
|
2018-08-11 21:44:42 -04:00
|
|
|
R8_UINT = 0xF6,
|
2018-03-22 17:40:11 -04:00
|
|
|
};
|
|
|
|
|
2018-07-02 13:42:04 -04:00
|
|
|
enum class DepthFormat : u32 {
|
2020-07-01 01:28:53 -04:00
|
|
|
D32_FLOAT = 0xA,
|
|
|
|
D16_UNORM = 0x13,
|
|
|
|
S8_UINT_Z24_UNORM = 0x14,
|
|
|
|
D24X8_UNORM = 0x15,
|
|
|
|
D24S8_UNORM = 0x16,
|
|
|
|
D24C8_UNORM = 0x18,
|
|
|
|
D32_FLOAT_S8X24_UINT = 0x19,
|
2018-07-02 13:42:04 -04:00
|
|
|
};
|
|
|
|
|
2018-09-06 09:48:08 -04:00
|
|
|
struct CommandListHeader;
|
2018-03-22 16:19:35 -04:00
|
|
|
class DebugContext;
|
|
|
|
|
2018-03-22 21:04:30 -04:00
|
|
|
/**
|
|
|
|
* Struct describing framebuffer configuration
|
|
|
|
*/
|
|
|
|
struct FramebufferConfig {
|
|
|
|
enum class PixelFormat : u32 {
|
2020-07-01 01:28:53 -04:00
|
|
|
A8B8G8R8_UNORM = 1,
|
|
|
|
RGB565_UNORM = 4,
|
|
|
|
B8G8R8A8_UNORM = 5,
|
2018-03-22 21:04:30 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
VAddr address;
|
|
|
|
u32 offset;
|
|
|
|
u32 width;
|
|
|
|
u32 height;
|
|
|
|
u32 stride;
|
|
|
|
PixelFormat pixel_format;
|
2018-03-23 14:58:27 -04:00
|
|
|
|
|
|
|
using TransformFlags = Service::NVFlinger::BufferQueue::BufferTransformFlags;
|
|
|
|
TransformFlags transform_flags;
|
2019-02-26 22:47:49 -05:00
|
|
|
Common::Rectangle<int> crop_rect;
|
2018-03-22 21:04:30 -04:00
|
|
|
};
|
|
|
|
|
2018-03-18 16:15:05 -04:00
|
|
|
namespace Engines {
|
|
|
|
class Fermi2D;
|
|
|
|
class Maxwell3D;
|
2018-06-10 18:02:33 -04:00
|
|
|
class MaxwellDMA;
|
2019-01-22 18:49:31 -05:00
|
|
|
class KeplerCompute;
|
2018-09-08 16:58:20 -04:00
|
|
|
class KeplerMemory;
|
2018-03-18 16:15:05 -04:00
|
|
|
} // namespace Engines
|
|
|
|
|
2018-02-11 23:44:12 -05:00
|
|
|
enum class EngineID {
|
|
|
|
FERMI_TWOD_A = 0x902D, // 2D Engine
|
|
|
|
MAXWELL_B = 0xB197, // 3D Engine
|
2019-01-22 18:49:31 -05:00
|
|
|
KEPLER_COMPUTE_B = 0xB1C0,
|
2018-02-11 23:44:12 -05:00
|
|
|
KEPLER_INLINE_TO_MEMORY_B = 0xA140,
|
|
|
|
MAXWELL_DMA_COPY_A = 0xB0B5,
|
|
|
|
};
|
|
|
|
|
2019-03-03 23:54:16 -05:00
|
|
|
class MemoryManager;
|
|
|
|
|
2019-02-08 23:21:53 -05:00
|
|
|
class GPU {
|
2018-02-11 23:44:12 -05:00
|
|
|
public:
|
2018-11-23 23:20:56 -05:00
|
|
|
struct MethodCall {
|
|
|
|
u32 method{};
|
|
|
|
u32 argument{};
|
|
|
|
u32 subchannel{};
|
|
|
|
u32 method_count{};
|
|
|
|
|
|
|
|
bool IsLastCall() const {
|
|
|
|
return method_count <= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
MethodCall(u32 method, u32 argument, u32 subchannel = 0, u32 method_count = 0)
|
|
|
|
: method(method), argument(argument), subchannel(subchannel),
|
|
|
|
method_count(method_count) {}
|
|
|
|
};
|
|
|
|
|
2020-10-26 23:07:36 -04:00
|
|
|
explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
|
2020-06-10 23:58:57 -04:00
|
|
|
virtual ~GPU();
|
|
|
|
|
|
|
|
/// Binds a renderer to the GPU.
|
|
|
|
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
|
|
|
|
|
2018-11-23 23:20:56 -05:00
|
|
|
/// Calls a GPU method.
|
|
|
|
void CallMethod(const MethodCall& method_call);
|
2018-02-11 23:44:12 -05:00
|
|
|
|
2020-04-20 02:16:56 -04:00
|
|
|
/// Calls a GPU multivalue method.
|
2020-04-20 13:42:14 -04:00
|
|
|
void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
|
|
|
u32 methods_pending);
|
2020-04-20 02:16:56 -04:00
|
|
|
|
2020-04-16 12:29:53 -04:00
|
|
|
/// Flush all current written commands into the host GPU for execution.
|
2019-07-26 14:20:43 -04:00
|
|
|
void FlushCommands();
|
2020-04-16 12:29:53 -04:00
|
|
|
/// Synchronizes CPU writes with Host GPU memory.
|
2020-02-16 08:51:37 -05:00
|
|
|
void SyncGuestHost();
|
2020-04-16 12:29:53 -04:00
|
|
|
/// Signal the ending of command list.
|
2020-02-17 17:10:23 -05:00
|
|
|
virtual void OnCommandListEnd();
|
2019-07-26 14:20:43 -04:00
|
|
|
|
2020-04-16 12:29:53 -04:00
|
|
|
/// Request a host GPU memory flush from the CPU.
|
|
|
|
u64 RequestFlush(VAddr addr, std::size_t size);
|
2020-02-20 10:55:32 -05:00
|
|
|
|
2020-04-16 12:29:53 -04:00
|
|
|
/// Obtains current flush request fence id.
|
2020-02-20 10:55:32 -05:00
|
|
|
u64 CurrentFlushRequestFence() const {
|
|
|
|
return current_flush_fence.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2020-04-16 12:29:53 -04:00
|
|
|
/// Tick pending requests within the GPU.
|
2020-02-20 10:55:32 -05:00
|
|
|
void TickWork();
|
|
|
|
|
2018-08-28 10:57:56 -04:00
|
|
|
/// Returns a reference to the Maxwell3D GPU engine.
|
|
|
|
Engines::Maxwell3D& Maxwell3D();
|
|
|
|
|
2018-07-20 18:31:36 -04:00
|
|
|
/// Returns a const reference to the Maxwell3D GPU engine.
|
|
|
|
const Engines::Maxwell3D& Maxwell3D() const;
|
|
|
|
|
2019-07-14 21:25:13 -04:00
|
|
|
/// Returns a reference to the KeplerCompute GPU engine.
|
|
|
|
Engines::KeplerCompute& KeplerCompute();
|
|
|
|
|
|
|
|
/// Returns a reference to the KeplerCompute GPU engine.
|
|
|
|
const Engines::KeplerCompute& KeplerCompute() const;
|
|
|
|
|
2018-08-28 10:57:56 -04:00
|
|
|
/// Returns a reference to the GPU memory manager.
|
|
|
|
Tegra::MemoryManager& MemoryManager();
|
2018-03-22 16:19:35 -04:00
|
|
|
|
2018-08-28 10:57:56 -04:00
|
|
|
/// Returns a const reference to the GPU memory manager.
|
|
|
|
const Tegra::MemoryManager& MemoryManager() const;
|
2018-02-11 23:44:12 -05:00
|
|
|
|
2018-11-23 23:20:56 -05:00
|
|
|
/// Returns a reference to the GPU DMA pusher.
|
|
|
|
Tegra::DmaPusher& DmaPusher();
|
|
|
|
|
2020-10-26 23:07:36 -04:00
|
|
|
/// Returns a const reference to the GPU DMA pusher.
|
|
|
|
const Tegra::DmaPusher& DmaPusher() const;
|
|
|
|
|
|
|
|
/// Returns a reference to the GPU CDMA pusher.
|
|
|
|
Tegra::CDmaPusher& CDmaPusher();
|
|
|
|
|
|
|
|
/// Returns a const reference to the GPU CDMA pusher.
|
|
|
|
const Tegra::CDmaPusher& CDmaPusher() const;
|
|
|
|
|
2020-03-24 22:58:49 -04:00
|
|
|
VideoCore::RendererBase& Renderer() {
|
|
|
|
return *renderer;
|
|
|
|
}
|
|
|
|
|
|
|
|
const VideoCore::RendererBase& Renderer() const {
|
|
|
|
return *renderer;
|
|
|
|
}
|
|
|
|
|
2020-07-09 23:36:38 -04:00
|
|
|
VideoCore::ShaderNotify& ShaderNotify() {
|
|
|
|
return *shader_notify;
|
|
|
|
}
|
|
|
|
|
|
|
|
const VideoCore::ShaderNotify& ShaderNotify() const {
|
|
|
|
return *shader_notify;
|
|
|
|
}
|
|
|
|
|
2019-09-26 19:08:22 -04:00
|
|
|
// Waits for the GPU to finish working
|
|
|
|
virtual void WaitIdle() const = 0;
|
|
|
|
|
2019-09-25 19:43:23 -04:00
|
|
|
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
|
2019-12-30 07:03:20 -05:00
|
|
|
void WaitFence(u32 syncpoint_id, u32 value);
|
2019-09-25 19:43:23 -04:00
|
|
|
|
2019-06-18 20:53:21 -04:00
|
|
|
void IncrementSyncPoint(u32 syncpoint_id);
|
2019-06-07 12:56:30 -04:00
|
|
|
|
2019-06-18 20:53:21 -04:00
|
|
|
u32 GetSyncpointValue(u32 syncpoint_id) const;
|
2019-06-07 12:56:30 -04:00
|
|
|
|
2019-06-18 20:53:21 -04:00
|
|
|
void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
|
2019-06-07 21:13:20 -04:00
|
|
|
|
2019-06-18 20:53:21 -04:00
|
|
|
bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
|
2019-06-07 12:56:30 -04:00
|
|
|
|
2020-02-10 09:32:51 -05:00
|
|
|
u64 GetTicks() const;
|
|
|
|
|
2019-06-18 20:53:21 -04:00
|
|
|
std::unique_lock<std::mutex> LockSync() {
|
|
|
|
return std::unique_lock{sync_mutex};
|
2019-06-07 22:13:40 -04:00
|
|
|
}
|
|
|
|
|
2019-06-10 08:19:27 -04:00
|
|
|
bool IsAsync() const {
|
|
|
|
return is_async;
|
|
|
|
}
|
|
|
|
|
2020-10-26 23:07:36 -04:00
|
|
|
bool UseNvdec() const {
|
|
|
|
return use_nvdec;
|
|
|
|
}
|
2018-11-23 23:20:56 -05:00
|
|
|
|
2020-10-27 01:11:41 -04:00
|
|
|
enum class FenceOperation : u32 {
|
|
|
|
Acquire = 0,
|
|
|
|
Increment = 1,
|
|
|
|
};
|
|
|
|
|
|
|
|
union FenceAction {
|
|
|
|
u32 raw;
|
|
|
|
BitField<0, 1, FenceOperation> op;
|
|
|
|
BitField<8, 24, u32> syncpoint_id;
|
|
|
|
|
2020-11-04 20:41:16 -05:00
|
|
|
static CommandHeader Build(FenceOperation op, u32 syncpoint_id) {
|
2020-10-27 01:11:41 -04:00
|
|
|
FenceAction result{};
|
|
|
|
result.op.Assign(op);
|
|
|
|
result.syncpoint_id.Assign(syncpoint_id);
|
|
|
|
return {result.raw};
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-01-29 21:49:18 -05:00
|
|
|
struct Regs {
|
2020-07-26 17:26:29 -04:00
|
|
|
static constexpr size_t NUM_REGS = 0x40;
|
2019-01-29 21:49:18 -05:00
|
|
|
|
|
|
|
union {
|
|
|
|
struct {
|
2019-11-03 18:54:03 -05:00
|
|
|
INSERT_UNION_PADDING_WORDS(0x4);
|
2019-01-29 21:49:18 -05:00
|
|
|
struct {
|
|
|
|
u32 address_high;
|
|
|
|
u32 address_low;
|
|
|
|
|
2019-03-27 12:12:53 -04:00
|
|
|
GPUVAddr SemaphoreAddress() const {
|
2019-01-29 21:49:18 -05:00
|
|
|
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
|
|
|
|
address_low);
|
|
|
|
}
|
2019-03-27 12:12:53 -04:00
|
|
|
} semaphore_address;
|
2019-01-29 21:49:18 -05:00
|
|
|
|
|
|
|
u32 semaphore_sequence;
|
|
|
|
u32 semaphore_trigger;
|
2019-11-03 18:54:03 -05:00
|
|
|
INSERT_UNION_PADDING_WORDS(0xC);
|
2019-01-29 21:49:18 -05:00
|
|
|
|
2020-07-26 17:26:29 -04:00
|
|
|
// The pusher and the puller share the reference counter, the pusher only has read
|
2019-01-29 21:49:18 -05:00
|
|
|
// access
|
|
|
|
u32 reference_count;
|
2019-11-03 18:54:03 -05:00
|
|
|
INSERT_UNION_PADDING_WORDS(0x5);
|
2019-01-29 21:49:18 -05:00
|
|
|
|
|
|
|
u32 semaphore_acquire;
|
|
|
|
u32 semaphore_release;
|
2019-07-18 08:54:42 -04:00
|
|
|
u32 fence_value;
|
2020-10-27 01:11:41 -04:00
|
|
|
FenceAction fence_action;
|
2019-11-03 18:54:03 -05:00
|
|
|
INSERT_UNION_PADDING_WORDS(0xE2);
|
2019-01-29 21:49:18 -05:00
|
|
|
|
|
|
|
// Puller state
|
|
|
|
u32 acquire_mode;
|
|
|
|
u32 acquire_source;
|
|
|
|
u32 acquire_active;
|
|
|
|
u32 acquire_timeout;
|
|
|
|
u32 acquire_value;
|
|
|
|
};
|
|
|
|
std::array<u32, NUM_REGS> reg_array;
|
|
|
|
};
|
|
|
|
} regs{};
|
2019-01-23 22:17:55 -05:00
|
|
|
|
2019-04-09 14:02:00 -04:00
|
|
|
/// Performs any additional setup necessary in order to begin GPU emulation.
|
|
|
|
/// This can be used to launch any necessary threads and register any necessary
|
|
|
|
/// core timing events.
|
|
|
|
virtual void Start() = 0;
|
|
|
|
|
2020-04-03 11:58:43 -04:00
|
|
|
/// Obtain the CPU Context
|
|
|
|
virtual void ObtainContext() = 0;
|
|
|
|
|
|
|
|
/// Release the CPU Context
|
|
|
|
virtual void ReleaseContext() = 0;
|
|
|
|
|
2019-01-21 15:18:09 -05:00
|
|
|
/// Push GPU command entries to be processed
|
2019-02-08 23:21:53 -05:00
|
|
|
virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0;
|
2019-01-21 15:18:09 -05:00
|
|
|
|
2020-10-26 23:07:36 -04:00
|
|
|
/// Push GPU command buffer entries to be processed
|
|
|
|
virtual void PushCommandBuffer(Tegra::ChCommandHeaderList& entries) = 0;
|
|
|
|
|
2019-01-21 15:18:09 -05:00
|
|
|
/// Swap buffers (render frame)
|
2019-08-21 00:55:25 -04:00
|
|
|
virtual void SwapBuffers(const Tegra::FramebufferConfig* framebuffer) = 0;
|
2019-01-29 21:49:18 -05:00
|
|
|
|
2019-01-23 22:17:55 -05:00
|
|
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
2020-04-05 12:58:23 -04:00
|
|
|
virtual void FlushRegion(VAddr addr, u64 size) = 0;
|
2019-01-23 22:17:55 -05:00
|
|
|
|
|
|
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
2020-04-05 12:58:23 -04:00
|
|
|
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
|
2019-01-23 22:17:55 -05:00
|
|
|
|
|
|
|
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
|
2020-04-05 12:58:23 -04:00
|
|
|
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
|
2019-01-23 22:17:55 -05:00
|
|
|
|
2019-06-07 12:56:30 -04:00
|
|
|
protected:
|
2019-06-18 20:53:21 -04:00
|
|
|
virtual void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const = 0;
|
2019-06-07 12:56:30 -04:00
|
|
|
|
2019-01-07 23:32:02 -05:00
|
|
|
private:
|
|
|
|
void ProcessBindMethod(const MethodCall& method_call);
|
2020-10-27 01:11:41 -04:00
|
|
|
void ProcessFenceActionMethod();
|
|
|
|
void ProcessWaitForInterruptMethod();
|
2019-01-07 23:32:02 -05:00
|
|
|
void ProcessSemaphoreTriggerMethod();
|
|
|
|
void ProcessSemaphoreRelease();
|
|
|
|
void ProcessSemaphoreAcquire();
|
|
|
|
|
2019-01-21 15:18:09 -05:00
|
|
|
/// Calls a GPU puller method.
|
2019-01-07 23:32:02 -05:00
|
|
|
void CallPullerMethod(const MethodCall& method_call);
|
2019-01-23 22:17:55 -05:00
|
|
|
|
2019-01-21 15:18:09 -05:00
|
|
|
/// Calls a GPU engine method.
|
2019-01-07 23:32:02 -05:00
|
|
|
void CallEngineMethod(const MethodCall& method_call);
|
2019-01-23 22:17:55 -05:00
|
|
|
|
2020-04-20 02:16:56 -04:00
|
|
|
/// Calls a GPU engine multivalue method.
|
2020-04-20 13:42:14 -04:00
|
|
|
void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
|
|
|
u32 methods_pending);
|
2020-04-20 02:16:56 -04:00
|
|
|
|
2019-01-21 15:18:09 -05:00
|
|
|
/// Determines where the method should be executed.
|
2020-04-20 02:16:56 -04:00
|
|
|
bool ExecuteMethodOnEngine(u32 method);
|
2019-01-07 23:32:02 -05:00
|
|
|
|
2019-02-08 23:21:53 -05:00
|
|
|
protected:
|
2019-06-07 20:41:06 -04:00
|
|
|
Core::System& system;
|
2020-06-11 20:24:45 -04:00
|
|
|
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
2020-06-10 23:58:57 -04:00
|
|
|
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
|
2020-10-26 23:07:36 -04:00
|
|
|
std::unique_ptr<Tegra::CDmaPusher> cdma_pusher;
|
2020-03-24 22:58:49 -04:00
|
|
|
std::unique_ptr<VideoCore::RendererBase> renderer;
|
2020-10-26 23:07:36 -04:00
|
|
|
const bool use_nvdec;
|
2018-08-28 10:57:56 -04:00
|
|
|
|
2019-02-08 23:21:53 -05:00
|
|
|
private:
|
2019-03-03 23:54:16 -05:00
|
|
|
/// Mapping of command subchannels to their bound engine ids
|
2018-09-04 07:54:50 -04:00
|
|
|
std::array<EngineID, 8> bound_engines = {};
|
2018-02-11 23:44:12 -05:00
|
|
|
/// 3D engine
|
|
|
|
std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
|
|
|
|
/// 2D engine
|
|
|
|
std::unique_ptr<Engines::Fermi2D> fermi_2d;
|
|
|
|
/// Compute engine
|
2019-01-22 18:49:31 -05:00
|
|
|
std::unique_ptr<Engines::KeplerCompute> kepler_compute;
|
2018-06-10 18:02:33 -04:00
|
|
|
/// DMA engine
|
|
|
|
std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
|
2018-09-08 16:58:20 -04:00
|
|
|
/// Inline memory engine
|
|
|
|
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
|
2020-07-09 23:36:38 -04:00
|
|
|
/// Shader build notifier
|
|
|
|
std::unique_ptr<VideoCore::ShaderNotify> shader_notify;
|
2019-06-07 12:56:30 -04:00
|
|
|
|
|
|
|
std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{};
|
|
|
|
|
2019-06-12 07:52:49 -04:00
|
|
|
std::array<std::list<u32>, Service::Nvidia::MaxSyncPoints> syncpt_interrupts;
|
2019-06-07 22:13:40 -04:00
|
|
|
|
2019-06-08 16:45:25 -04:00
|
|
|
std::mutex sync_mutex;
|
2020-10-26 23:07:36 -04:00
|
|
|
std::mutex device_mutex;
|
2019-06-10 08:19:27 -04:00
|
|
|
|
2019-12-30 07:03:20 -05:00
|
|
|
std::condition_variable sync_cv;
|
|
|
|
|
2020-02-20 10:55:32 -05:00
|
|
|
struct FlushRequest {
|
2020-04-16 12:29:53 -04:00
|
|
|
FlushRequest(u64 fence, VAddr addr, std::size_t size)
|
2020-02-20 10:55:32 -05:00
|
|
|
: fence{fence}, addr{addr}, size{size} {}
|
|
|
|
u64 fence;
|
2020-04-16 12:29:53 -04:00
|
|
|
VAddr addr;
|
2020-02-20 10:55:32 -05:00
|
|
|
std::size_t size;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::list<FlushRequest> flush_requests;
|
|
|
|
std::atomic<u64> current_flush_fence{};
|
|
|
|
u64 last_flush_fence{};
|
|
|
|
std::mutex flush_request_mutex;
|
|
|
|
|
2019-06-10 08:19:27 -04:00
|
|
|
const bool is_async;
|
2018-02-11 23:44:12 -05:00
|
|
|
};
|
|
|
|
|
2019-01-29 21:49:18 -05:00
|
|
|
#define ASSERT_REG_POSITION(field_name, position) \
|
|
|
|
static_assert(offsetof(GPU::Regs, field_name) == position * 4, \
|
|
|
|
"Field " #field_name " has invalid position")
|
|
|
|
|
2019-03-27 12:12:53 -04:00
|
|
|
ASSERT_REG_POSITION(semaphore_address, 0x4);
|
2019-01-29 21:49:18 -05:00
|
|
|
ASSERT_REG_POSITION(semaphore_sequence, 0x6);
|
|
|
|
ASSERT_REG_POSITION(semaphore_trigger, 0x7);
|
|
|
|
ASSERT_REG_POSITION(reference_count, 0x14);
|
|
|
|
ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
|
|
|
|
ASSERT_REG_POSITION(semaphore_release, 0x1B);
|
2019-07-18 08:54:42 -04:00
|
|
|
ASSERT_REG_POSITION(fence_value, 0x1C);
|
|
|
|
ASSERT_REG_POSITION(fence_action, 0x1D);
|
2019-01-29 21:49:18 -05:00
|
|
|
|
|
|
|
ASSERT_REG_POSITION(acquire_mode, 0x100);
|
|
|
|
ASSERT_REG_POSITION(acquire_source, 0x101);
|
|
|
|
ASSERT_REG_POSITION(acquire_active, 0x102);
|
|
|
|
ASSERT_REG_POSITION(acquire_timeout, 0x103);
|
|
|
|
ASSERT_REG_POSITION(acquire_value, 0x104);
|
|
|
|
|
|
|
|
#undef ASSERT_REG_POSITION
|
|
|
|
|
2018-02-11 23:44:12 -05:00
|
|
|
} // namespace Tegra
|