diff --git a/app/src/main/cpp/skyline/gpu/memory_manager.cpp b/app/src/main/cpp/skyline/gpu/memory_manager.cpp index 2f4bafbe..fd9cf421 100644 --- a/app/src/main/cpp/skyline/gpu/memory_manager.cpp +++ b/app/src/main/cpp/skyline/gpu/memory_manager.cpp @@ -16,7 +16,7 @@ namespace skyline::gpu::vmm { std::optional MemoryManager::FindChunk(ChunkState state, u64 size, u64 alignment) { auto chunk{std::find_if(chunks.begin(), chunks.end(), [state, size, alignment](const ChunkDescriptor &chunk) -> bool { - return (alignment ? util::IsAligned(chunk.address, alignment) : true) && chunk.size > size && chunk.state == state; + return (alignment ? util::IsAligned(chunk.virtAddr, alignment) : true) && chunk.size > size && chunk.state == state; })}; if (chunk != chunks.end()) @@ -30,7 +30,7 @@ namespace skyline::gpu::vmm { for (auto chunk{chunks.begin()}; chunk != chunkEnd; chunk++) { if (chunk->CanContain(newChunk)) { auto oldChunk{*chunk}; - u64 newSize{newChunk.address - chunk->address}; + u64 newSize{newChunk.virtAddr - chunk->virtAddr}; u64 extension{chunk->size - newSize - newChunk.size}; if (newSize == 0) { @@ -41,16 +41,16 @@ namespace skyline::gpu::vmm { } if (extension) - chunks.insert(std::next(chunk), ChunkDescriptor(newChunk.address + newChunk.size, extension, (oldChunk.state == ChunkState::Mapped) ? (oldChunk.pointer + newSize + newChunk.size) : 0, oldChunk.state)); + chunks.insert(std::next(chunk), ChunkDescriptor(newChunk.virtAddr + newChunk.size, extension, (oldChunk.state == ChunkState::Mapped) ? (oldChunk.cpuPtr + newSize + newChunk.size) : 0, oldChunk.state)); - return newChunk.address; - } else if (chunk->address + chunk->size > newChunk.address) { - chunk->size = newChunk.address - chunk->address; + return newChunk.virtAddr; + } else if (chunk->virtAddr + chunk->size > newChunk.virtAddr) { + chunk->size = newChunk.virtAddr - chunk->virtAddr; // Deletes all chunks that are within the chunk being inserted and split the final one auto tailChunk{std::next(chunk)}; while (tailChunk != chunkEnd) { - if (tailChunk->address + tailChunk->size >= newChunk.address + newChunk.size) + if (tailChunk->virtAddr + tailChunk->size >= newChunk.virtAddr + newChunk.size) break; tailChunk = chunks.erase(tailChunk); @@ -61,11 +61,11 @@ namespace skyline::gpu::vmm { if (tailChunk == chunkEnd) break; - u64 chunkSliceOffset{newChunk.address + newChunk.size - tailChunk->address}; - tailChunk->address += chunkSliceOffset; + u64 chunkSliceOffset{newChunk.virtAddr + newChunk.size - tailChunk->virtAddr}; + tailChunk->virtAddr += chunkSliceOffset; tailChunk->size -= chunkSliceOffset; if (tailChunk->state == ChunkState::Mapped) - tailChunk->pointer += chunkSliceOffset; + tailChunk->cpuPtr += chunkSliceOffset; // If the size of the head chunk is zero then we can directly replace it with our new one rather than inserting it auto headChunk{std::prev(tailChunk)}; @@ -74,7 +74,7 @@ namespace skyline::gpu::vmm { else chunks.insert(std::next(headChunk), newChunk); - return newChunk.address; + return newChunk.virtAddr; } } @@ -94,44 +94,44 @@ namespace skyline::gpu::vmm { return InsertChunk(chunk); } - u64 MemoryManager::ReserveFixed(u64 address, u64 size) { - if (!util::IsAligned(address, constant::GpuPageSize)) + u64 MemoryManager::ReserveFixed(u64 virtAddr, u64 size) { + if (!util::IsAligned(virtAddr, constant::GpuPageSize)) return 0; size = util::AlignUp(size, constant::GpuPageSize); - return InsertChunk(ChunkDescriptor(address, size, 0, ChunkState::Reserved)); + return InsertChunk(ChunkDescriptor(virtAddr, size, nullptr, ChunkState::Reserved)); } - u64 MemoryManager::MapAllocate(u8 *pointer, u64 size) { + u64 MemoryManager::MapAllocate(u8 *cpuPtr, u64 size) { size = util::AlignUp(size, constant::GpuPageSize); auto mappedChunk{FindChunk(ChunkState::Unmapped, size)}; if (!mappedChunk) return 0; auto chunk{*mappedChunk}; - chunk.pointer = pointer; + chunk.cpuPtr = cpuPtr; chunk.size = size; chunk.state = ChunkState::Mapped; return InsertChunk(chunk); } - u64 MemoryManager::MapFixed(u64 address, u8 *pointer, u64 size) { - if (!util::IsAligned(address, constant::GpuPageSize)) + u64 MemoryManager::MapFixed(u64 virtAddr, u8 *cpuPtr, u64 size) { + if (!util::IsAligned(virtAddr, constant::GpuPageSize)) return false; size = util::AlignUp(size, constant::GpuPageSize); - return InsertChunk(ChunkDescriptor(address, size, pointer, ChunkState::Mapped)); + return InsertChunk(ChunkDescriptor(virtAddr, size, cpuPtr, ChunkState::Mapped)); } - bool MemoryManager::Unmap(u64 address, u64 size) { - if (!util::IsAligned(address, constant::GpuPageSize)) + bool MemoryManager::Unmap(u64 virtAddr, u64 size) { + if (!util::IsAligned(virtAddr, constant::GpuPageSize)) return false; try { - InsertChunk(ChunkDescriptor(address, size, 0, ChunkState::Unmapped)); + InsertChunk(ChunkDescriptor(virtAddr, size, 0, ChunkState::Unmapped)); } catch (const std::exception &e) { return false; } @@ -139,19 +139,19 @@ namespace skyline::gpu::vmm { return true; } - void MemoryManager::Read(u8 *destination, u64 address, u64 size) const { - auto chunk{std::upper_bound(chunks.begin(), chunks.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool { - return address < chunk.address; + void MemoryManager::Read(u8 *destination, u64 virtAddr, u64 size) const { + auto chunk{std::upper_bound(chunks.begin(), chunks.end(), virtAddr, [](const u64 address, const ChunkDescriptor &chunk) -> bool { + return address < chunk.virtAddr; })}; if (chunk == chunks.end() || chunk->state != ChunkState::Mapped) - throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size); + throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size); chunk--; u64 initialSize{size}; - u64 chunkOffset{address - chunk->address}; - u8 *source{chunk->pointer + chunkOffset}; + u64 chunkOffset{virtAddr - chunk->virtAddr}; + u8 *source{chunk->cpuPtr + chunkOffset}; u64 sourceSize{std::min(chunk->size - chunkOffset, size)}; // A continuous region in the GPU address space may be made up of several discontinuous regions in physical memory so we have to iterate over all chunks @@ -161,27 +161,27 @@ namespace skyline::gpu::vmm { size -= sourceSize; if (size) { if (++chunk == chunks.end() || chunk->state != ChunkState::Mapped) - throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size); + throw exception("Failed to read region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size); - source = chunk->pointer; + source = chunk->cpuPtr; sourceSize = std::min(chunk->size, size); } } } - void MemoryManager::Write(u8 *source, u64 address, u64 size) const { - auto chunk{std::upper_bound(chunks.begin(), chunks.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool { - return address < chunk.address; + void MemoryManager::Write(u8 *source, u64 virtAddr, u64 size) const { + auto chunk{std::upper_bound(chunks.begin(), chunks.end(), virtAddr, [](const u64 address, const ChunkDescriptor &chunk) -> bool { + return address < chunk.virtAddr; })}; if (chunk == chunks.end() || chunk->state != ChunkState::Mapped) - throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size); + throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size); chunk--; u64 initialSize{size}; - u64 chunkOffset{address - chunk->address}; - u8 *destination{chunk->pointer + chunkOffset}; + u64 chunkOffset{virtAddr - chunk->virtAddr}; + u8 *destination{chunk->cpuPtr + chunkOffset}; u64 destinationSize{std::min(chunk->size - chunkOffset, size)}; // A continuous region in the GPU address space may be made up of several discontinuous regions in physical memory so we have to iterate over all chunks @@ -191,9 +191,9 @@ namespace skyline::gpu::vmm { size -= destinationSize; if (size) { if (++chunk == chunks.end() || chunk->state != ChunkState::Mapped) - throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", address, size); + throw exception("Failed to write region in GPU address space: Address: 0x{:X}, Size: 0x{:X}", virtAddr, size); - destination = chunk->pointer; + destination = chunk->cpuPtr; destinationSize = std::min(chunk->size, size); } } diff --git a/app/src/main/cpp/skyline/gpu/memory_manager.h b/app/src/main/cpp/skyline/gpu/memory_manager.h index 12759eb9..e127c19c 100644 --- a/app/src/main/cpp/skyline/gpu/memory_manager.h +++ b/app/src/main/cpp/skyline/gpu/memory_manager.h @@ -11,30 +11,30 @@ namespace skyline { } namespace gpu::vmm { - enum ChunkState { + enum class ChunkState { Unmapped, //!< The chunk is unmapped Reserved, //!< The chunk is reserved Mapped //!< The chunk is mapped and a CPU side address is present }; struct ChunkDescriptor { - u64 address; //!< The address of the chunk in the GPU address space + u64 virtAddr; //!< The address of the chunk in the virtual address space u64 size; //!< The size of the chunk in bytes - u8 *pointer; //!< A pointer to the chunk in the CPU address space (if mapped) + u8 *cpuPtr; //!< A pointer to the chunk in the application's address space (if mapped) ChunkState state; - ChunkDescriptor(u64 address, u64 size, u8 *pointer, ChunkState state) : address(address), size(size), pointer(pointer), state(state) {} + ChunkDescriptor(u64 virtAddr, u64 size, u8 *cpuPtr, ChunkState state) : virtAddr(virtAddr), size(size), cpuPtr(cpuPtr), state(state) {} /** * @return If the given chunk can be contained wholly within this chunk */ inline bool CanContain(const ChunkDescriptor &chunk) { - return (chunk.address >= this->address) && ((this->size + this->address) >= (chunk.size + chunk.address)); + return (chunk.virtAddr >= this->virtAddr) && ((this->size + this->virtAddr) >= (chunk.size + chunk.virtAddr)); } }; /** - * @brief The MemoryManager class handles the mapping of the GPU address space + * @brief The MemoryManager class handles mapping between a virtual address space and an application's address space */ class MemoryManager { private: @@ -42,18 +42,18 @@ namespace skyline { std::vector chunks; /** - * @brief Finds a chunk of the specified type in the GPU address space that is larger than the given size + * @brief Finds a chunk in the virtual address space that is larger than meets the given requirements * @param state The state of the chunk to find * @param size The minimum size of the chunk to find - * @param alignment The alignment of the chunk to find - * @return The first unmapped chunk in the GPU address space that fulfils the requested conditions + * @param alignment The minimum alignment of the chunk to find + * @return The first applicable chunk */ std::optional FindChunk(ChunkState state, u64 size, u64 alignment = 0); /** * @brief Inserts a chunk into the chunk list, resizing and splitting as necessary * @param newChunk The chunk to insert - * @return The base virtual GPU address of the inserted chunk + * @return The base virtual address of the inserted chunk */ u64 InsertChunk(const ChunkDescriptor &newChunk); @@ -61,81 +61,82 @@ namespace skyline { MemoryManager(const DeviceState &state); /** - * @brief Reserves a region of the GPU address space so it will not be chosen automatically when mapping + * @brief Reserves a region of the virtual address space so it will not be chosen automatically when mapping * @param size The size of the region to reserve * @param alignment The alignment of the region to reserve - * @return The virtual GPU base address of the region base + * @return The base virtual address of the reserved region */ u64 ReserveSpace(u64 size, u64 alignment); /** - * @brief Reserves a fixed region of the GPU address space so it will not be chosen automatically when mapping - * @param address The virtual base address of the region to allocate + * @brief Reserves a fixed region of the virtual address space so it will not be chosen automatically when mapping + * @param virtAddr The virtual base address of the region to allocate * @param size The size of the region to allocate - * @return The virtual address of the region base + * @return The base virtual address of the reserved region */ - u64 ReserveFixed(u64 address, u64 size); + u64 ReserveFixed(u64 virtAddr, u64 size); /** - * @brief Maps a physical CPU memory region to an automatically chosen virtual memory region - * @param pointer A pointer to the region to be mapped into the GPU's address space + * @brief Maps a CPU memory region into an automatically chosen region of the virtual address space + * @param cpuPtr A pointer to the region to be mapped into the virtual address space * @param size The size of the region to map - * @return The virtual address of the region base + * @return The base virtual address of the mapped region */ - u64 MapAllocate(u8 *pointer, u64 size); + u64 MapAllocate(u8 *cpuPtr, u64 size); /** - * @brief Maps a physical CPU memory region to a fixed virtual memory region - * @param address The target virtual address of the region - * @param pointer A pointer to the region to be mapped into the GPU's address space + * @brief Maps a CPU memory region to a fixed region in the virtual address space + * @param virtAddr The target virtual address of the region + * @param cpuPtr A pointer to the region to be mapped into the virtual address space * @param size The size of the region to map - * @return The virtual address of the region base + * @return The base virtual address of the mapped region */ - u64 MapFixed(u64 address, u8 *pointer, u64 size); + u64 MapFixed(u64 virtAddr, u8 *cpuPtr, u64 size); /** - * @brief Unmaps all chunks in the given region from the GPU address space + * @brief Unmaps all chunks in the given region from the virtual address space * @return Whether the operation succeeded */ - bool Unmap(u64 address, u64 size); + bool Unmap(u64 virtAddr, u64 size); - void Read(u8 *destination, u64 address, u64 size) const; + + void Read(u8 *destination, u64 virtAddr, u64 size) const; /** - * @brief Reads in a span from a region of the GPU virtual address space + * @brief Reads in a span from a region of the virtual address space */ template - void Read(span destination, u64 address) const { - Read(reinterpret_cast(destination.data()), address, destination.size_bytes()); + void Read(span destination, u64 virtAddr) const { + Read(reinterpret_cast(destination.data()), virtAddr, destination.size_bytes()); } /** - * @brief Reads in an object from a region of the GPU virtual address space + * @brief Reads in an object from a region of the virtual address space * @tparam T The type of object to return */ template - T Read(u64 address) const { + T Read(u64 virtAddr) const { T obj; - Read(reinterpret_cast(&obj), address, sizeof(T)); + Read(reinterpret_cast(&obj), virtAddr, sizeof(T)); return obj; } - void Write(u8 *source, u64 address, u64 size) const; + void Write(u8 *source, u64 virtAddr, u64 size) const; /** - * @brief Writes out a span to a region of the GPU virtual address space + * @brief Writes out a span to a region of the virtual address space */ template - void Write(span source, u64 address) const { - Write(reinterpret_cast(source.data()), address, source.size_bytes()); + void Write(span source, u64 virtAddr) const { + Write(reinterpret_cast(source.data()), virtAddr, source.size_bytes()); } /** - * @brief Reads in an object from a region of the GPU virtual address space + * @brief Reads in an object from a region of the virtual address space */ template - void Write(T source, u64 address) const { - Write(reinterpret_cast(&source), address, sizeof(T)); + void Write(T source, u64 virtAddr) const { + Write(reinterpret_cast(&source), virtAddr, sizeof(T)); } }; } diff --git a/app/src/main/cpp/skyline/services/hosbinder/GraphicBufferProducer.cpp b/app/src/main/cpp/skyline/services/hosbinder/GraphicBufferProducer.cpp index 09e47d48..fb996f90 100644 --- a/app/src/main/cpp/skyline/services/hosbinder/GraphicBufferProducer.cpp +++ b/app/src/main/cpp/skyline/services/hosbinder/GraphicBufferProducer.cpp @@ -148,7 +148,7 @@ namespace skyline::service::hosbinder { throw exception("Unknown pixel format used for FB"); } - auto texture{std::make_shared(state, nvBuffer->pointer + gbpBuffer.offset, gpu::texture::Dimensions(gbpBuffer.width, gbpBuffer.height), format, gpu::texture::TileMode::Block, gpu::texture::TileConfig{.surfaceWidth = static_cast(gbpBuffer.stride), .blockHeight = static_cast(1U << gbpBuffer.blockHeightLog2), .blockDepth = 1})}; + auto texture{std::make_shared(state, nvBuffer->ptr + gbpBuffer.offset, gpu::texture::Dimensions(gbpBuffer.width, gbpBuffer.height), format, gpu::texture::TileMode::Block, gpu::texture::TileConfig{.surfaceWidth = static_cast(gbpBuffer.stride), .blockHeight = static_cast(1U << gbpBuffer.blockHeightLog2), .blockDepth = 1})}; queue[data.slot] = std::make_shared(gbpBuffer, texture->InitializeTexture()); state.gpu->presentation.bufferEvent->Signal(); diff --git a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_as_gpu.cpp b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_as_gpu.cpp index 1101b0b4..0a31e9ef 100644 --- a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_as_gpu.cpp +++ b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_as_gpu.cpp @@ -98,7 +98,7 @@ namespace skyline::service::nvdrv::device { } u64 gpuAddress{data.offset + data.bufferOffset}; - u8 *cpuPtr{region->second.cpuPtr + data.bufferOffset}; + u8 *cpuPtr{region->second.ptr + data.bufferOffset}; if (state.gpu->memoryManager.MapFixed(gpuAddress, cpuPtr, data.mappingSize)) { state.logger->Warn("Failed to remap GPU address space region: 0x{:X}", gpuAddress); @@ -108,20 +108,20 @@ namespace skyline::service::nvdrv::device { return NvStatus::Success; } - u8 *mapPointer{data.bufferOffset + mapping->pointer}; - u64 mapSize{data.mappingSize ? data.mappingSize : mapping->size}; + u8 *cpuPtr{data.bufferOffset + mapping->ptr}; + u64 size{data.mappingSize ? data.mappingSize : mapping->size}; if (data.flags.fixed) - data.offset = state.gpu->memoryManager.MapFixed(data.offset, mapPointer, mapSize); + data.offset = state.gpu->memoryManager.MapFixed(data.offset, cpuPtr, size); else - data.offset = state.gpu->memoryManager.MapAllocate(mapPointer, mapSize); + data.offset = state.gpu->memoryManager.MapAllocate(cpuPtr, size); if (data.offset == 0) { state.logger->Warn("Failed to map GPU address space region!"); return NvStatus::BadParameter; } - regionMap[data.offset] = {mapPointer, mapSize, data.flags.fixed}; + regionMap[data.offset] = {cpuPtr, size, data.flags.fixed}; return NvStatus::Success; } catch (const std::out_of_range &) { @@ -176,17 +176,17 @@ namespace skyline::service::nvdrv::device { constexpr u32 MinAlignmentShift{0x10}; // This shift is applied to all addresses passed to Remap auto entries{buffer.cast()}; - for (auto entry : entries) { + for (const auto &entry : entries) { try { auto driver{nvdrv::driver.lock()}; auto nvmap{driver->nvMap.lock()}; auto mapping{nvmap->GetObject(entry.nvmapHandle)}; - u64 mapAddress{static_cast(entry.gpuOffset) << MinAlignmentShift}; - u8 *mapPointer{mapping->pointer + (static_cast(entry.mapOffset) << MinAlignmentShift)}; - u64 mapSize{static_cast(entry.pages) << MinAlignmentShift}; + u64 virtAddr{static_cast(entry.gpuOffset) << MinAlignmentShift}; + u8 *cpuPtr{mapping->ptr + (static_cast(entry.mapOffset) << MinAlignmentShift)}; + u64 size{static_cast(entry.pages) << MinAlignmentShift}; - state.gpu->memoryManager.MapFixed(mapAddress, mapPointer, mapSize); + state.gpu->memoryManager.MapFixed(virtAddr, cpuPtr, size); } catch (const std::out_of_range &) { state.logger->Warn("Invalid NvMap handle: 0x{:X}", entry.nvmapHandle); return NvStatus::BadParameter; diff --git a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_as_gpu.h b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_as_gpu.h index 31a65c4d..08e34d9d 100644 --- a/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_as_gpu.h +++ b/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost_as_gpu.h @@ -13,7 +13,7 @@ namespace skyline::service::nvdrv::device { class NvHostAsGpu : public NvDevice { private: struct AddressSpaceRegion { - u8 *cpuPtr; + u8 *ptr; u64 size; bool fixed; }; diff --git a/app/src/main/cpp/skyline/services/nvdrv/devices/nvmap.cpp b/app/src/main/cpp/skyline/services/nvdrv/devices/nvmap.cpp index c48375c6..98da976d 100644 --- a/app/src/main/cpp/skyline/services/nvdrv/devices/nvmap.cpp +++ b/app/src/main/cpp/skyline/services/nvdrv/devices/nvmap.cpp @@ -49,7 +49,7 @@ namespace skyline::service::nvdrv::device { u32 align; // In u8 kind; // In u8 _pad0_[7]; - u8 *pointer; // InOut + u8 *ptr; // InOut } &data = buffer.as(); try { @@ -58,10 +58,10 @@ namespace skyline::service::nvdrv::device { object->flags = data.flags; object->align = data.align; object->kind = data.kind; - object->pointer = data.pointer; + object->ptr = data.ptr; object->status = NvMapObject::Status::Allocated; - state.logger->Debug("Handle: 0x{:X}, HeapMask: 0x{:X}, Flags: {}, Align: 0x{:X}, Kind: {}, Pointer: 0x{:X}", data.handle, data.heapMask, data.flags, data.align, data.kind, data.pointer); + state.logger->Debug("Handle: 0x{:X}, HeapMask: 0x{:X}, Flags: {}, Align: 0x{:X}, Kind: {}, Pointer: 0x{:X}", data.handle, data.heapMask, data.flags, data.align, data.kind, data.ptr); return NvStatus::Success; } catch (const std::out_of_range &) { state.logger->Warn("Invalid NvMap handle: 0x{:X}", data.handle); @@ -73,7 +73,7 @@ namespace skyline::service::nvdrv::device { struct Data { u32 handle; // In u32 _pad0_; - u8 *pointer; // Out + u8 *ptr; // Out u32 size; // Out u32 flags; // Out } &data = buffer.as(); @@ -82,17 +82,17 @@ namespace skyline::service::nvdrv::device { try { auto &object{maps.at(data.handle - 1)}; if (object.use_count() > 1) { - data.pointer = object->pointer; + data.ptr = object->ptr; data.flags = 0x0; } else { - data.pointer = nullptr; + data.ptr = nullptr; data.flags = 0x1; // Not free yet } data.size = object->size; object = nullptr; - state.logger->Debug("Handle: 0x{:X} -> Pointer: 0x{:X}, Size: 0x{:X}, Flags: 0x{:X}", data.handle, data.pointer, data.size, data.flags); + state.logger->Debug("Handle: 0x{:X} -> Pointer: 0x{:X}, Size: 0x{:X}, Flags: 0x{:X}", data.handle, data.ptr, data.size, data.flags); return NvStatus::Success; } catch (const std::out_of_range &) { state.logger->Warn("Invalid NvMap handle: 0x{:X}", data.handle); diff --git a/app/src/main/cpp/skyline/services/nvdrv/devices/nvmap.h b/app/src/main/cpp/skyline/services/nvdrv/devices/nvmap.h index 57f9d072..4784ee70 100644 --- a/app/src/main/cpp/skyline/services/nvdrv/devices/nvmap.h +++ b/app/src/main/cpp/skyline/services/nvdrv/devices/nvmap.h @@ -18,7 +18,7 @@ namespace skyline::service::nvdrv::device { struct NvMapObject { u32 id; u32 size; - u8 *pointer{}; + u8 *ptr{}; u32 flags{}; //!< The flag of the memory (0 = Read Only, 1 = Read-Write) u32 align{}; u32 heapMask{}; //!< This is set during Alloc and returned during Param