Address feedback

This commit is contained in:
Billy Laws 2021-09-21 21:01:44 +01:00 committed by ◱ Mark
parent 8db2cf29f2
commit a18f1aa889
15 changed files with 81 additions and 122 deletions

View File

@ -84,8 +84,6 @@ namespace skyline {
/** /**
* @brief A wrapper around std::optional that also stores a HOS result code * @brief A wrapper around std::optional that also stores a HOS result code
* @tparam ValueType The object type to hold
* @tparam ResultType The result type to hold
*/ */
template<typename ValueType, typename ResultType = Result> template<typename ValueType, typename ResultType = Result>
class ResultValue { class ResultValue {

View File

@ -14,10 +14,10 @@ namespace skyline {
* @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector
*/ */
template<typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, bool PaContigSplit, size_t AddressSpaceBits> requires AddressSpaceValid<VaType, AddressSpaceBits> template<typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, bool PaContigSplit, size_t AddressSpaceBits> requires AddressSpaceValid<VaType, AddressSpaceBits>
class FlatAddressSpaceMap { extern class FlatAddressSpaceMap {
private: private:
/** /**
* @brief Represents a block of memory in the AS * @brief Represents a block of memory in the AS, the physical mapping is contiguous until another block with a different phys address is hit
*/ */
struct Block { struct Block {
VaType virt{UnmappedVa}; //!< VA of the block VaType virt{UnmappedVa}; //!< VA of the block
@ -70,15 +70,15 @@ namespace skyline {
FlatAddressSpaceMap() = default; FlatAddressSpaceMap() = default;
/** void Map(VaType virt, PaType phys, VaType size, bool flag = {}) {
* @brief Locked version of MapLocked std::scoped_lock lock(blockMutex);
*/ MapLocked(virt, phys, size, flag);
void Map(VaType virt, PaType phys, VaType size, bool flag = {}); }
/** void Unmap(VaType virt, VaType size) {
* @brief Locked version of UnmapLocked std::scoped_lock lock(blockMutex);
*/ UnmapLocked(virt, size);
void Unmap(VaType virt, VaType size); }
}; };
/** /**

View File

@ -23,11 +23,11 @@ namespace skyline {
VaType virtEnd{virt + size}; VaType virtEnd{virt + size};
if (virtEnd > vaLimit) if (virtEnd > vaLimit)
throw exception("Trying to map a block past the VA limit!"); throw exception("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}", virtEnd, vaLimit);
auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)}; auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)};
if (blockEndSuccessor == blocks.begin()) if (blockEndSuccessor == blocks.begin())
throw exception("Trying to map a block before the VA start!"); throw exception("Trying to map a block before the VA start: virtEnd: 0x{:X}", virtEnd);
auto blockEndPredecessor{std::prev(blockEndSuccessor)}; auto blockEndPredecessor{std::prev(blockEndSuccessor)};
@ -68,11 +68,11 @@ namespace skyline {
// Walk the block vector to find the start successor as this is more efficient than another binary search in most scenarios // Walk the block vector to find the start successor as this is more efficient than another binary search in most scenarios
while (std::prev(blockStartSuccessor)->virt >= virt) while (std::prev(blockStartSuccessor)->virt >= virt)
std::advance(blockStartSuccessor, -1); blockStartSuccessor--;
// Check that the start successor is either the end block or something in between // Check that the start successor is either the end block or something in between
if (blockStartSuccessor->virt > virtEnd) { if (blockStartSuccessor->virt > virtEnd) {
throw exception("Unsorted block in AS map!"); throw exception("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt);
} else if (blockStartSuccessor->virt == virtEnd) { } else if (blockStartSuccessor->virt == virtEnd) {
// We need to create a new block as there are none spare that we would overwrite // We need to create a new block as there are none spare that we would overwrite
blocks.insert(blockStartSuccessor, Block(virt, phys, flag)); blocks.insert(blockStartSuccessor, Block(virt, phys, flag));
@ -99,17 +99,17 @@ namespace skyline {
VaType virtEnd{virt + size}; VaType virtEnd{virt + size};
if (virtEnd > vaLimit) if (virtEnd > vaLimit)
throw exception("Trying to unmap a block past the VA limit!"); throw exception("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}", virtEnd, vaLimit);
auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)}; auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)};
if (blockEndSuccessor == blocks.begin()) if (blockEndSuccessor == blocks.begin())
throw exception("Trying to unmap a block before the VA start!"); throw exception("Trying to unmap a block before the VA start: virtEnd: 0x{:X}", virtEnd);
auto blockEndPredecessor{std::prev(blockEndSuccessor)}; auto blockEndPredecessor{std::prev(blockEndSuccessor)};
auto walkBackToPredecessor{[&](auto iter) { auto walkBackToPredecessor{[&](auto iter) {
while (iter->virt >= virt) while (iter->virt >= virt)
std::advance(iter, -1); iter--;
return iter; return iter;
}}; }};
@ -174,7 +174,7 @@ namespace skyline {
auto blockStartSuccessor{std::next(blockStartPredecessor)}; auto blockStartSuccessor{std::next(blockStartPredecessor)};
if (blockStartSuccessor->virt > virtEnd) { if (blockStartSuccessor->virt > virtEnd) {
throw exception("Unsorted block in AS map!"); throw exception("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt);
} else if (blockStartSuccessor->virt == virtEnd) { } else if (blockStartSuccessor->virt == virtEnd) {
// There are no blocks between the start and the end that would let us skip inserting a new one for head // There are no blocks between the start and the end that would let us skip inserting a new one for head
@ -192,23 +192,13 @@ namespace skyline {
// Erase overwritten blocks, skipping the first one as we have written the unmapped start block there // Erase overwritten blocks, skipping the first one as we have written the unmapped start block there
if (auto eraseStart{std::next(blockStartSuccessor)}; blockStartSuccessor != blockEndPredecessor) { if (auto eraseStart{std::next(blockStartSuccessor)}; blockStartSuccessor != blockEndPredecessor) {
if (eraseStart == blockEndPredecessor) if (eraseStart == blockEndPredecessor)
throw exception("Unexpected Memory Manager state!"); throw exception("Trying to erase the end block of a newly unmapped region!");
blocks.erase(eraseStart, blockEndPredecessor); blocks.erase(eraseStart, blockEndPredecessor);
} }
} }
} }
MAP_MEMBER(void)::Map(VaType virt, PaType phys, VaType size, bool flag) {
std::scoped_lock lock(blockMutex);
MapLocked(virt, phys, size, flag);
}
MAP_MEMBER(void)::Unmap(VaType virt, VaType size) {
std::scoped_lock lock(blockMutex);
UnmapLocked(virt, size);
}
MM_MEMBER(void)::Read(u8 *destination, VaType virt, VaType size) { MM_MEMBER(void)::Read(u8 *destination, VaType virt, VaType size) {
std::scoped_lock lock(this->blockMutex); std::scoped_lock lock(this->blockMutex);
@ -231,7 +221,7 @@ namespace skyline {
if (predecessor->flag) // Sparse mapping if (predecessor->flag) // Sparse mapping
std::memset(destination, 0, blockReadSize); std::memset(destination, 0, blockReadSize);
else else
throw exception("Page fault at: 0x{:X}", predecessor->virt); throw exception("Page fault at 0x{:X}", predecessor->virt);
} else { } else {
std::memcpy(destination, blockPhys, blockReadSize); std::memcpy(destination, blockPhys, blockReadSize);
} }
@ -267,7 +257,7 @@ namespace skyline {
while (size) { while (size) {
if (predecessor->phys == nullptr) { if (predecessor->phys == nullptr) {
if (!predecessor->flag) // Sparse mappings allow unmapped writes if (!predecessor->flag) // Sparse mappings allow unmapped writes
throw exception("Page fault at: 0x{:X}", predecessor->virt); throw exception("Page fault at 0x{:X}", predecessor->virt);
} else { } else {
std::memcpy(blockPhys, source, blockWriteSize); std::memcpy(blockPhys, source, blockWriteSize);
} }
@ -350,10 +340,10 @@ namespace skyline {
} }
ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) { ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
this->MapLocked(virt, true, size); this->Map(virt, true, size);
} }
ALLOC_MEMBER(void)::Free(VaType virt, VaType size) { ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
this->UnmapLocked(virt, size); this->Unmap(virt, size);
} }
} }

View File

@ -183,6 +183,8 @@ namespace skyline::signal {
std::call_once(signalHandlerOnce[signal], [signal, &action]() { std::call_once(signalHandlerOnce[signal], [signal, &action]() {
struct sigaction oldAction; struct sigaction oldAction;
Sigaction(signal, &action, &oldAction); Sigaction(signal, &action, &oldAction);
if (oldAction.sa_flags && oldAction.sa_flags != action.sa_flags)
throw exception("Old sigaction flags aren't equivalent to the replaced signal: {:#b} | {:#b}", oldAction.sa_flags, action.sa_flags);
DefaultSignalHandlers.at(signal).function = (oldAction.sa_flags & SA_SIGINFO) ? oldAction.sa_sigaction : reinterpret_cast<void (*)(int, struct siginfo *, void *)>(oldAction.sa_handler); DefaultSignalHandlers.at(signal).function = (oldAction.sa_flags & SA_SIGINFO) ? oldAction.sa_sigaction : reinterpret_cast<void (*)(int, struct siginfo *, void *)>(oldAction.sa_handler);
}); });

View File

@ -16,7 +16,6 @@ namespace skyline::service {
FunctionNotImplemented = 38, // ENOSYS FunctionNotImplemented = 38, // ENOSYS
NotSupported = 95, // EOPNOTSUPP, ENOTSUP NotSupported = 95, // EOPNOTSUPP, ENOTSUP
TimedOut = 110, // ETIMEDOUT TimedOut = 110, // ETIMEDOUT
}; };
template<typename ValueType> template<typename ValueType>

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT OR MPL-2.0 // SPDX-License-Identifier: MIT OR MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/) // Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
#include <numeric>
#include <kernel/types/KProcess.h> #include <kernel/types/KProcess.h>
#include "INvDrvServices.h" #include "INvDrvServices.h"
#include "driver.h" #include "driver.h"
@ -18,7 +19,7 @@ namespace skyline::service::nvdrv {
INvDrvServices::INvDrvServices(const DeviceState &state, ServiceManager &manager, Driver &driver, const SessionPermissions &perms) : BaseService(state, manager), driver(driver), ctx(SessionContext{.perms = perms}) {} INvDrvServices::INvDrvServices(const DeviceState &state, ServiceManager &manager, Driver &driver, const SessionPermissions &perms) : BaseService(state, manager), driver(driver), ctx(SessionContext{.perms = perms}) {}
Result INvDrvServices::Open(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) { Result INvDrvServices::Open(type::KSession &session, ipc::IpcRequest &request, ipc::IpcResponse &response) {
constexpr FileDescriptor SessionFdLimit{sizeof(u64) * 2 * 8}; //!< Nvdrv uses two 64 bit variables to store a bitset constexpr FileDescriptor SessionFdLimit{std::numeric_limits<u64>::digits * 2}; //!< Nvdrv uses two 64 bit variables to store a bitset
auto path{request.inputBuf.at(0).as_string(true)}; auto path{request.inputBuf.at(0).as_string(true)};
if (path.empty() || nextFdIndex == SessionFdLimit) { if (path.empty() || nextFdIndex == SessionFdLimit) {
@ -100,7 +101,7 @@ namespace skyline::service::nvdrv {
auto fd{request.Pop<FileDescriptor>()}; auto fd{request.Pop<FileDescriptor>()};
auto ioctl{request.Pop<IoctlDescriptor>()}; auto ioctl{request.Pop<IoctlDescriptor>()};
// The inline buffer is technically not required // Inline buffer is optional
auto inlineBuf{request.inputBuf.size() > 1 ? request.inputBuf.at(1) : span<u8>{}}; auto inlineBuf{request.inputBuf.size() > 1 ? request.inputBuf.at(1) : span<u8>{}};
auto buf{GetMainIoctlBuffer(ioctl, request.inputBuf.at(0), request.outputBuf.at(0))}; auto buf{GetMainIoctlBuffer(ioctl, request.inputBuf.at(0), request.outputBuf.at(0))};
@ -114,7 +115,7 @@ namespace skyline::service::nvdrv {
auto fd{request.Pop<FileDescriptor>()}; auto fd{request.Pop<FileDescriptor>()};
auto ioctl{request.Pop<IoctlDescriptor>()}; auto ioctl{request.Pop<IoctlDescriptor>()};
// The inline buffer is technically not required // Inline buffer is optional
auto inlineBuf{request.outputBuf.size() > 1 ? request.outputBuf.at(1) : span<u8>{}}; auto inlineBuf{request.outputBuf.size() > 1 ? request.outputBuf.at(1) : span<u8>{}};
auto buf{GetMainIoctlBuffer(ioctl, request.inputBuf.at(0), request.outputBuf.at(0))}; auto buf{GetMainIoctlBuffer(ioctl, request.inputBuf.at(0), request.outputBuf.at(0))};

View File

@ -21,7 +21,7 @@ namespace skyline::service::nvdrv::core {
if (pAddress) if (pAddress)
flags.keepUncachedAfterFree = false; flags.keepUncachedAfterFree = false;
else else
throw exception("Mapping nvmap handles without a cpu side address is unimplemented!"); throw exception("Mapping nvmap handles without a CPU side address is unimplemented!");
size = util::AlignUp(size, PAGE_SIZE); size = util::AlignUp(size, PAGE_SIZE);
alignedSize = util::AlignUp(size, align); alignedSize = util::AlignUp(size, align);
@ -53,18 +53,18 @@ namespace skyline::service::nvdrv::core {
NvMap::NvMap(const DeviceState &state) : state(state) {} NvMap::NvMap(const DeviceState &state) : state(state) {}
void NvMap::AddHandle(std::shared_ptr<Handle> handle) { void NvMap::AddHandle(std::shared_ptr<Handle> handleDesc) {
std::scoped_lock lock(handlesLock); std::scoped_lock lock(handlesLock);
handles.emplace(handle->id, std::move(handle)); handles.emplace(handleDesc->id, std::move(handleDesc));
} }
bool NvMap::TryRemoveHandle(const std::shared_ptr<Handle> &h) { bool NvMap::TryRemoveHandle(const std::shared_ptr<Handle> &handleDesc) {
// No dupes left, we can remove from handle map // No dupes left, we can remove from handle map
if (h->dupes == 0 && h->internalDupes == 0) { if (handleDesc->dupes == 0 && handleDesc->internalDupes == 0) {
std::scoped_lock lock(handlesLock); std::scoped_lock lock(handlesLock);
auto it{handles.find(h->id)}; auto it{handles.find(handleDesc->id)};
if (it != handles.end()) if (it != handles.end())
handles.erase(it); handles.erase(it);
@ -79,10 +79,10 @@ namespace skyline::service::nvdrv::core {
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
u32 id{nextHandleId.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; u32 id{nextHandleId.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
auto h{std::make_shared<Handle>(size, id)}; auto handleDesc{std::make_shared<Handle>(size, id)};
AddHandle(h); AddHandle(handleDesc);
return h; return handleDesc;
} }
std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) { std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
@ -100,34 +100,34 @@ namespace skyline::service::nvdrv::core {
FreeInfo freeInfo; FreeInfo freeInfo;
// We use a weak ptr here so we can tell when the handle has been freed and report that back to guest // We use a weak ptr here so we can tell when the handle has been freed and report that back to guest
if (auto h = hWeak.lock()) { if (auto handleDesc = hWeak.lock()) {
if (!h) [[unlikely]] if (!handleDesc) [[unlikely]]
return std::nullopt; return std::nullopt;
std::scoped_lock lock(h->mutex); std::scoped_lock lock(handleDesc->mutex);
if (internalSession) { if (internalSession) {
if (--h->internalDupes < 0) if (--handleDesc->internalDupes < 0)
state.logger->Warn("Internal duplicate count inbalance detected!"); state.logger->Warn("Internal duplicate count inbalance detected!");
} else { } else {
if (--h->dupes < 0) { if (--handleDesc->dupes < 0) {
state.logger->Warn("User duplicate count inbalance detected!"); state.logger->Warn("User duplicate count inbalance detected!");
} else if (h->dupes == 0) { } else if (handleDesc->dupes == 0) {
// TODO: unpin // TODO: unpin
} }
} }
// Try to remove the shared ptr to the handle from the map, if nothing else is using the handle // Try to remove the shared ptr to the handle from the map, if nothing else is using the handle
// then it will now be freed when `h` goes out of scope // then it will now be freed when `h` goes out of scope
if (TryRemoveHandle(h)) if (TryRemoveHandle(handleDesc))
state.logger->Debug("Removed nvmap handle: {}", handle); state.logger->Debug("Removed nvmap handle: {}", handle);
else else
state.logger->Debug("Tried to free nvmap handle: {} but didn't as it still has duplicates", handle); state.logger->Debug("Tried to free nvmap handle: {} but didn't as it still has duplicates", handle);
freeInfo = { freeInfo = {
.address = h->address, .address = handleDesc->address,
.size = h->size, .size = handleDesc->size,
.wasUncached = h->flags.mapUncached, .wasUncached = handleDesc->flags.mapUncached,
}; };
} else { } else {
return std::nullopt; return std::nullopt;

View File

@ -89,8 +89,6 @@ namespace skyline::service::nvdrv::core {
bool TryRemoveHandle(const std::shared_ptr<Handle> &h); bool TryRemoveHandle(const std::shared_ptr<Handle> &h);
public: public:
NvMap(const DeviceState &state);
/** /**
* @brief Encapsulates the result of a FreeHandle operation * @brief Encapsulates the result of a FreeHandle operation
*/ */
@ -100,6 +98,8 @@ namespace skyline::service::nvdrv::core {
bool wasUncached; //!< If the handle was allocated as uncached bool wasUncached; //!< If the handle was allocated as uncached
}; };
NvMap(const DeviceState &state);
/** /**
* @brief Creates an unallocated handle of the given size * @brief Creates an unallocated handle of the given size
*/ */
@ -109,7 +109,7 @@ namespace skyline::service::nvdrv::core {
/** /**
* @brief Tries to free a handle and remove a single dupe * @brief Tries to free a handle and remove a single dupe
* If a handle has no dupes left and has no other users a FreeInfo struct will be returned describing the prior state of the handle. * @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned describing the prior state of the handle
*/ */
std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internalSession); std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internalSession);
}; };

View File

@ -73,7 +73,7 @@ namespace skyline::service::nvdrv::core {
u32 UpdateMin(u32 id); u32 UpdateMin(u32 id);
/** /**
* @return A fence that will be signalled once this syncpoint hits it's maximum value * @return A fence that will be signalled once this syncpoint hits its maximum value
*/ */
Fence GetSyncpointFence(u32 id); Fence GetSyncpointFence(u32 id);
}; };

View File

@ -99,7 +99,7 @@ namespace skyline::service::nvdrv::device::nvhost {
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
for (const auto &mapping : allocation.mappings) for (const auto &mapping : allocation.mappings)
FreeMapping(mapping->offset); FreeMappingLocked(mapping->offset);
// Unset sparse flag if required // Unset sparse flag if required
if (allocation.sparse) if (allocation.sparse)
@ -151,7 +151,8 @@ namespace skyline::service::nvdrv::device::nvhost {
} }
PosixResult AsGpu::MapBufferEx(In<MappingFlags> flags, In<u32> kind, In<core::NvMap::Handle::Id> handle, In<u64> bufferOffset, In<u64> mappingSize, InOut<u64> offset) { PosixResult AsGpu::MapBufferEx(In<MappingFlags> flags, In<u32> kind, In<core::NvMap::Handle::Id> handle, In<u64> bufferOffset, In<u64> mappingSize, InOut<u64> offset) {
state.logger->Debug("flags: ( fixed: {}, remap: {} ), kind: {}, handle: {}, bufferOffset: 0x{:X}, mappingSize: 0x{:X}, offset: 0x{:X}", flags.fixed, flags.remap, kind, handle, bufferOffset, mappingSize, offset); state.logger->Debug("flags: ( fixed: {}, remap: {} ), kind: {}, handle: {}, bufferOffset: 0x{:X}, mappingSize: 0x{:X}, offset: 0x{:X}",
flags.fixed, flags.remap, kind, handle, bufferOffset, mappingSize, offset);
std::scoped_lock lock(mutex); std::scoped_lock lock(mutex);

View File

@ -35,8 +35,7 @@ namespace skyline::service::nvdrv::device::nvhost {
private: private:
/** /**
* @brief Syncpoint Events are used to expose fences to the userspace, they can be waited on using an IOCTL * @brief Syncpoint Events are used to expose fences to the userspace, they can be waited on using an IOCTL or be converted into a native HOS KEvent object that can be waited on just like any other KEvent on the guest
* or be converted into a native HOS KEvent object that can be waited on just like any other KEvent on the guest
*/ */
class SyncpointEvent { class SyncpointEvent {
private: private:
@ -81,7 +80,7 @@ namespace skyline::service::nvdrv::device::nvhost {
std::array<std::unique_ptr<SyncpointEvent>, SyncpointEventCount> syncpointEvents{}; std::array<std::unique_ptr<SyncpointEvent>, SyncpointEventCount> syncpointEvents{};
/** /**
* @brief Finds a free syncpoint event for the given syncpoint id * @brief Finds a free syncpoint event for the given syncpoint ID
* @note syncpointEventMutex MUST be locked when calling this * @note syncpointEventMutex MUST be locked when calling this
* @return The free event slot * @return The free event slot
*/ */

View File

@ -9,30 +9,29 @@ namespace skyline::service::nvdrv::device {
NvMap::NvMap(const DeviceState &state, Core &core, const SessionContext &ctx) : NvDevice(state, core, ctx) {} NvMap::NvMap(const DeviceState &state, Core &core, const SessionContext &ctx) : NvDevice(state, core, ctx) {}
PosixResult NvMap::Create(In<u32> size, Out<NvMapCore::Handle::Id> handle) { PosixResult NvMap::Create(In<u32> size, Out<NvMapCore::Handle::Id> handle) {
auto h{core.nvMap.CreateHandle(util::AlignUp(size, PAGE_SIZE))}; auto handleDesc{core.nvMap.CreateHandle(util::AlignUp(size, PAGE_SIZE))};
if (h) { if (handleDesc) {
(*h)->origSize = size; // Orig size is the unaligned size (*handleDesc)->origSize = size; // Orig size is the unaligned size
handle = (*h)->id; handle = (*handleDesc)->id;
state.logger->Debug("handle: {}, size: 0x{:X}", (*h)->id, size); state.logger->Debug("handle: {}, size: 0x{:X}", (*handleDesc)->id, size);
} }
return h; return handleDesc;
} }
PosixResult NvMap::FromId(In<NvMapCore::Handle::Id> id, Out<NvMapCore::Handle::Id> handle) { PosixResult NvMap::FromId(In<NvMapCore::Handle::Id> id, Out<NvMapCore::Handle::Id> handle) {
state.logger->Debug("id: {}", id); state.logger->Debug("id: {}", id);
// Handles and IDs are always the same value in nvmap however IDs can be used globally given the right permissions. // Handles and IDs are always the same value in nvmap however IDs can be used globally given the right permissions.
// Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and so this function // Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and so this function just does simple validation and passes through the handle id.
// just does simple validation and passes through the handle id.
if (!id) [[unlikely]] if (!id) [[unlikely]]
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
auto h{core.nvMap.GetHandle(id)}; auto handleDesc{core.nvMap.GetHandle(id)};
if (!h) [[unlikely]] if (!handleDesc) [[unlikely]]
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
return h->Duplicate(ctx.internalSession); return handleDesc->Duplicate(ctx.internalSession);
} }
PosixResult NvMap::Alloc(In<NvMapCore::Handle::Id> handle, In<u32> heapMask, In<NvMapCore::Handle::Flags> flags, InOut<u32> align, In<u8> kind, In<u64> address) { PosixResult NvMap::Alloc(In<NvMapCore::Handle::Id> handle, In<u32> heapMask, In<NvMapCore::Handle::Flags> flags, InOut<u32> align, In<u8> kind, In<u64> address) {
@ -48,11 +47,11 @@ namespace skyline::service::nvdrv::device {
if (align < PAGE_SIZE) [[unlikely]] if (align < PAGE_SIZE) [[unlikely]]
align = PAGE_SIZE; align = PAGE_SIZE;
auto h{core.nvMap.GetHandle(handle)}; auto handleDesc{core.nvMap.GetHandle(handle)};
if (!h) [[unlikely]] if (!handleDesc) [[unlikely]]
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
return h->Alloc(flags, align, kind, address); return handleDesc->Alloc(flags, align, kind, address);
} }
PosixResult NvMap::Free(In<NvMapCore::Handle::Id> handle, Out<u64> address, Out<u32> size, Out<NvMapCore::Handle::Flags> flags) { PosixResult NvMap::Free(In<NvMapCore::Handle::Id> handle, Out<u64> address, Out<u32> size, Out<NvMapCore::Handle::Flags> flags) {
@ -78,32 +77,32 @@ namespace skyline::service::nvdrv::device {
if (!handle) if (!handle)
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
auto h{core.nvMap.GetHandle(handle)}; auto handleDesc{core.nvMap.GetHandle(handle)};
if (!h) [[unlikely]] if (!handleDesc) [[unlikely]]
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
switch (param) { switch (param) {
case HandleParameterType::Size: case HandleParameterType::Size:
result = h->origSize; result = handleDesc->origSize;
return PosixResult::Success; return PosixResult::Success;
case HandleParameterType::Alignment: case HandleParameterType::Alignment:
result = h->align; result = handleDesc->align;
return PosixResult::Success; return PosixResult::Success;
case HandleParameterType::Base: case HandleParameterType::Base:
result = -static_cast<i32>(PosixResult::InvalidArgument); result = -static_cast<i32>(PosixResult::InvalidArgument);
return PosixResult::Success; return PosixResult::Success;
case HandleParameterType::Heap: case HandleParameterType::Heap:
if (h->allocated) if (handleDesc->allocated)
result = 0x40000000; result = 0x40000000;
else else
result = 0; result = 0;
return PosixResult::Success; return PosixResult::Success;
case HandleParameterType::Kind: case HandleParameterType::Kind:
result = h->kind; result = handleDesc->kind;
return PosixResult::Success; return PosixResult::Success;
case HandleParameterType::IsSharedMemMapped: case HandleParameterType::IsSharedMemMapped:
result = h->isSharedMemMapped; result = handleDesc->isSharedMemMapped;
return PosixResult::Success; return PosixResult::Success;
default: default:
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
@ -117,11 +116,11 @@ namespace skyline::service::nvdrv::device {
if (!handle) [[unlikely]] if (!handle) [[unlikely]]
return PosixResult::InvalidArgument; return PosixResult::InvalidArgument;
auto h{core.nvMap.GetHandle(handle)}; auto handleDesc{core.nvMap.GetHandle(handle)};
if (!h) [[unlikely]] if (!handleDesc) [[unlikely]]
return PosixResult::NotPermitted; // This will always return EPERM irrespective of if the handle exists or not return PosixResult::NotPermitted; // This will always return EPERM irrespective of if the handle exists or not
id = h->id; id = handleDesc->id;
return PosixResult::Success; return PosixResult::Success;
} }

View File

@ -39,31 +39,7 @@ namespace skyline::service::nvdrv {
DEVICE_CASE("/dev/nvhost-ctrl-gpu", nvhost::CtrlGpu) DEVICE_CASE("/dev/nvhost-ctrl-gpu", nvhost::CtrlGpu)
DEVICE_CASE("/dev/nvhost-gpu", nvhost::GpuChannel) DEVICE_CASE("/dev/nvhost-gpu", nvhost::GpuChannel)
); );
}/*
if (ctx.perms.AccessVic) {
switch (pathHash) {
ENTRY("/dev/nvhost-vic", nvhost::Vic)
default:
break;
} }
}
if (ctx.perms.AccessVideoDecoder) {
switch (pathHash) {
ENTRY("/dev/nvhost-nvdec", nvhost::NvDec)
default:
break;
}
}
if (ctx.perms.AccessJpeg) {
switch (pathHash) {
ENTRY("/dev/nvhost-nvjpg", nvhost::NvJpg)
default:
break;
}
}*/
#undef DEVICE_CASE #undef DEVICE_CASE
#undef DEVICE_SWITCH #undef DEVICE_SWITCH

View File

@ -9,9 +9,6 @@ namespace skyline::service::nvdrv {
using FileDescriptor = i32; using FileDescriptor = i32;
constexpr FileDescriptor InvalidFileDescriptor{-1}; constexpr FileDescriptor InvalidFileDescriptor{-1};
/**
* @brief Holds the permissions for an nvdrv instance
*/
struct SessionPermissions { struct SessionPermissions {
bool AccessGpu; bool AccessGpu;
bool AccessGpuDebug; bool AccessGpuDebug;
@ -29,9 +26,6 @@ namespace skyline::service::nvdrv {
bool ExportNvMapHandles; bool ExportNvMapHandles;
}; };
/**
* @brief Holds the per-session context for nvdrv
*/
struct SessionContext { struct SessionContext {
SessionPermissions perms; SessionPermissions perms;
bool internalSession; bool internalSession;