mirror of
https://github.com/skyline-emu/skyline.git
synced 2024-12-29 15:15:30 +03:00
Rework VMM + Adapt KMemory Objects to be in-process
Note: This commit isn't functional on it's own, it will require the rest of NCE3 to work
This commit is contained in:
parent
7b13f2d387
commit
60e82e6af0
@ -9,6 +9,7 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
#include <shared_mutex>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <string>
|
#include <string>
|
||||||
@ -104,6 +105,19 @@ namespace skyline {
|
|||||||
return ticks;
|
return ticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief A way to implicitly convert a pointer to size_t and leave it unaffected if it isn't a pointer
|
||||||
|
*/
|
||||||
|
template<class T>
|
||||||
|
T PointerValue(T item) {
|
||||||
|
return item;
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
size_t PointerValue(T *item) {
|
||||||
|
return reinterpret_cast<size_t>(item);
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return The value aligned up to the next multiple
|
* @return The value aligned up to the next multiple
|
||||||
* @note The multiple needs to be a power of 2
|
* @note The multiple needs to be a power of 2
|
||||||
@ -111,7 +125,7 @@ namespace skyline {
|
|||||||
template<typename TypeVal, typename TypeMul>
|
template<typename TypeVal, typename TypeMul>
|
||||||
constexpr TypeVal AlignUp(TypeVal value, TypeMul multiple) {
|
constexpr TypeVal AlignUp(TypeVal value, TypeMul multiple) {
|
||||||
multiple--;
|
multiple--;
|
||||||
return (value + multiple) & ~(multiple);
|
return (PointerValue(value) + multiple) & ~(multiple);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -120,7 +134,7 @@ namespace skyline {
|
|||||||
*/
|
*/
|
||||||
template<typename TypeVal, typename TypeMul>
|
template<typename TypeVal, typename TypeMul>
|
||||||
constexpr TypeVal AlignDown(TypeVal value, TypeMul multiple) {
|
constexpr TypeVal AlignDown(TypeVal value, TypeMul multiple) {
|
||||||
return value & ~(multiple - 1);
|
return PointerValue(value) & ~(multiple - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -129,22 +143,24 @@ namespace skyline {
|
|||||||
template<typename TypeVal, typename TypeMul>
|
template<typename TypeVal, typename TypeMul>
|
||||||
constexpr bool IsAligned(TypeVal value, TypeMul multiple) {
|
constexpr bool IsAligned(TypeVal value, TypeMul multiple) {
|
||||||
if ((multiple & (multiple - 1)) == 0)
|
if ((multiple & (multiple - 1)) == 0)
|
||||||
return !(value & (multiple - 1U));
|
return !(PointerValue(value) & (multiple - 1U));
|
||||||
else
|
else
|
||||||
return (value % multiple) == 0;
|
return (PointerValue(value) % multiple) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return If the value is page aligned
|
* @return If the value is page aligned
|
||||||
*/
|
*/
|
||||||
constexpr bool PageAligned(u64 value) {
|
template<typename TypeVal>
|
||||||
|
constexpr bool PageAligned(TypeVal value) {
|
||||||
return IsAligned(value, PAGE_SIZE);
|
return IsAligned(value, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return If the value is word aligned
|
* @return If the value is word aligned
|
||||||
*/
|
*/
|
||||||
constexpr bool WordAligned(u64 value) {
|
template<typename TypeVal>
|
||||||
|
constexpr bool WordAligned(TypeVal value) {
|
||||||
return IsAligned(value, WORD_BIT / 8);
|
return IsAligned(value, WORD_BIT / 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,6 @@ namespace skyline::gpu::vmm {
|
|||||||
*chunk = newChunk;
|
*chunk = newChunk;
|
||||||
} else {
|
} else {
|
||||||
chunk->size = newSize;
|
chunk->size = newSize;
|
||||||
|
|
||||||
chunk = chunks.insert(std::next(chunk), newChunk);
|
chunk = chunks.insert(std::next(chunk), newChunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,7 +67,6 @@ namespace skyline::gpu::vmm {
|
|||||||
if (tailChunk->state == ChunkState::Mapped)
|
if (tailChunk->state == ChunkState::Mapped)
|
||||||
tailChunk->cpuAddress += chunkSliceOffset;
|
tailChunk->cpuAddress += chunkSliceOffset;
|
||||||
|
|
||||||
|
|
||||||
// If the size of the head chunk is zero then we can directly replace it with our new one rather than inserting it
|
// If the size of the head chunk is zero then we can directly replace it with our new one rather than inserting it
|
||||||
auto headChunk{std::prev(tailChunk)};
|
auto headChunk{std::prev(tailChunk)};
|
||||||
if (headChunk->size == 0)
|
if (headChunk->size == 0)
|
||||||
|
@ -4,5 +4,5 @@
|
|||||||
#include "input.h"
|
#include "input.h"
|
||||||
|
|
||||||
namespace skyline::input {
|
namespace skyline::input {
|
||||||
Input::Input(const DeviceState &state) : state(state), kHid(std::make_shared<kernel::type::KSharedMemory>(state, NULL, sizeof(HidSharedMemory), memory::Permission(true, false, false))), hid(reinterpret_cast<HidSharedMemory *>(kHid->kernel.address)), npad(state, hid), touch(state, hid) {}
|
Input::Input(const DeviceState &state) : state(state), kHid(std::make_shared<kernel::type::KSharedMemory>(state, sizeof(HidSharedMemory))), hid(reinterpret_cast<HidSharedMemory *>(kHid->kernel.ptr)), npad(state, hid), touch(state, hid) {}
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
namespace skyline::kernel::ipc {
|
namespace skyline::kernel::ipc {
|
||||||
IpcRequest::IpcRequest(bool isDomain, const DeviceState &state) : isDomain(isDomain) {
|
IpcRequest::IpcRequest(bool isDomain, const DeviceState &state) : isDomain(isDomain) {
|
||||||
u8 *tls{state.process->GetPointer<u8>(state.thread->tls)};
|
auto tls{state.thread->tls};
|
||||||
u8 *pointer{tls};
|
u8 *pointer{tls};
|
||||||
|
|
||||||
header = reinterpret_cast<CommandHeader *>(pointer);
|
header = reinterpret_cast<CommandHeader *>(pointer);
|
||||||
@ -129,7 +129,7 @@ namespace skyline::kernel::ipc {
|
|||||||
IpcResponse::IpcResponse(const DeviceState &state) : state(state) {}
|
IpcResponse::IpcResponse(const DeviceState &state) : state(state) {}
|
||||||
|
|
||||||
void IpcResponse::WriteResponse(bool isDomain) {
|
void IpcResponse::WriteResponse(bool isDomain) {
|
||||||
auto tls{state.process->GetPointer<u8>(state.thread->tls)};
|
auto tls{state.thread->tls};
|
||||||
u8 *pointer{tls};
|
u8 *pointer{tls};
|
||||||
|
|
||||||
memset(tls, 0, constant::TlsIpcSize);
|
memset(tls, 0, constant::TlsIpcSize);
|
||||||
|
@ -5,118 +5,7 @@
|
|||||||
#include "types/KProcess.h"
|
#include "types/KProcess.h"
|
||||||
|
|
||||||
namespace skyline::kernel {
|
namespace skyline::kernel {
|
||||||
ChunkDescriptor *MemoryManager::GetChunk(u64 address) {
|
MemoryManager::MemoryManager(const DeviceState &state) : state(state) {}
|
||||||
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
|
|
||||||
return address < chunk.address;
|
|
||||||
})};
|
|
||||||
|
|
||||||
if (chunk-- != chunks.begin()) {
|
|
||||||
if ((chunk->address + chunk->size) > address)
|
|
||||||
return chunk.base();
|
|
||||||
}
|
|
||||||
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
BlockDescriptor *MemoryManager::GetBlock(u64 address, ChunkDescriptor *chunk) {
|
|
||||||
if (!chunk)
|
|
||||||
chunk = GetChunk(address);
|
|
||||||
|
|
||||||
if (chunk) {
|
|
||||||
auto block{std::upper_bound(chunk->blockList.begin(), chunk->blockList.end(), address, [](const u64 address, const BlockDescriptor &block) -> bool {
|
|
||||||
return address < block.address;
|
|
||||||
})};
|
|
||||||
|
|
||||||
if (block-- != chunk->blockList.begin()) {
|
|
||||||
if ((block->address + block->size) > address)
|
|
||||||
return block.base();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::InsertChunk(const ChunkDescriptor &chunk) {
|
|
||||||
auto upperChunk{std::upper_bound(chunks.begin(), chunks.end(), chunk.address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
|
|
||||||
return address < chunk.address;
|
|
||||||
})};
|
|
||||||
|
|
||||||
if (upperChunk != chunks.begin()) {
|
|
||||||
auto lowerChunk{std::prev(upperChunk)};
|
|
||||||
|
|
||||||
if (lowerChunk->address + lowerChunk->size > chunk.address)
|
|
||||||
throw exception("InsertChunk: Descriptors are colliding: 0x{:X} - 0x{:X} and 0x{:X} - 0x{:X}", lowerChunk->address, lowerChunk->address + lowerChunk->size, chunk.address, chunk.address + chunk.size);
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks.insert(upperChunk, chunk);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::DeleteChunk(u64 address) {
|
|
||||||
for (auto chunk{chunks.begin()}, end{chunks.end()}; chunk != end;) {
|
|
||||||
if (chunk->address <= address && (chunk->address + chunk->size) > address)
|
|
||||||
chunk = chunks.erase(chunk);
|
|
||||||
else
|
|
||||||
chunk++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::ResizeChunk(ChunkDescriptor *chunk, size_t size) {
|
|
||||||
if (chunk->blockList.size() == 1) {
|
|
||||||
chunk->blockList.begin()->size = size;
|
|
||||||
} else if (size > chunk->size) {
|
|
||||||
auto begin{chunk->blockList.begin()};
|
|
||||||
auto end{std::prev(chunk->blockList.end())};
|
|
||||||
|
|
||||||
BlockDescriptor block{
|
|
||||||
.address = (end->address + end->size),
|
|
||||||
.size = (chunk->address + size) - (end->address + end->size),
|
|
||||||
.permission = begin->permission,
|
|
||||||
.attributes = begin->attributes,
|
|
||||||
};
|
|
||||||
|
|
||||||
chunk->blockList.push_back(block);
|
|
||||||
} else if (size < chunk->size) {
|
|
||||||
auto endAddress{chunk->address + size};
|
|
||||||
|
|
||||||
for (auto block{chunk->blockList.begin()}, end{chunk->blockList.end()}; block != end;) {
|
|
||||||
if (block->address > endAddress)
|
|
||||||
block = chunk->blockList.erase(block);
|
|
||||||
else
|
|
||||||
block++;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto end{std::prev(chunk->blockList.end())};
|
|
||||||
end->size = endAddress - end->address;
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk->size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::InsertBlock(ChunkDescriptor *chunk, BlockDescriptor block) {
|
|
||||||
if (chunk->address + chunk->size < block.address + block.size)
|
|
||||||
throw exception("InsertBlock: Inserting block past chunk end is not allowed");
|
|
||||||
|
|
||||||
for (auto iter{chunk->blockList.begin()}; iter != chunk->blockList.end(); iter++) {
|
|
||||||
if (iter->address <= block.address) {
|
|
||||||
if ((iter->address + iter->size) > block.address) {
|
|
||||||
if (iter->address == block.address && iter->size == block.size) {
|
|
||||||
iter->attributes = block.attributes;
|
|
||||||
iter->permission = block.permission;
|
|
||||||
} else {
|
|
||||||
auto endBlock{*iter};
|
|
||||||
endBlock.address = (block.address + block.size);
|
|
||||||
endBlock.size = (iter->address + iter->size) - endBlock.address;
|
|
||||||
|
|
||||||
iter->size = block.address - iter->address;
|
|
||||||
chunk->blockList.insert(std::next(iter), {block, endBlock});
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
throw exception("InsertBlock: Block offset not present within current block list");
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::InitializeRegions(u64 address, u64 size, memory::AddressSpaceType type) {
|
void MemoryManager::InitializeRegions(u64 address, u64 size, memory::AddressSpaceType type) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
@ -162,55 +51,56 @@ namespace skyline::kernel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
chunks = {ChunkDescriptor{
|
||||||
|
.ptr = reinterpret_cast<u8*>(base.address),
|
||||||
|
.size = base.size,
|
||||||
|
.state = memory::states::Unmapped,
|
||||||
|
}};
|
||||||
|
|
||||||
state.logger->Debug("Region Map:\nCode Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nAlias Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nHeap Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nStack Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nTLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X})", code.address, code.address + code.size, code.size, alias.address, alias.address + alias.size, alias.size, heap.address, heap
|
state.logger->Debug("Region Map:\nCode Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nAlias Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nHeap Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nStack Region: 0x{:X} - 0x{:X} (Size: 0x{:X})\nTLS/IO Region: 0x{:X} - 0x{:X} (Size: 0x{:X})", code.address, code.address + code.size, code.size, alias.address, alias.address + alias.size, alias.size, heap.address, heap
|
||||||
.address + heap.size, heap.size, stack.address, stack.address + stack.size, stack.size, tlsIo.address, tlsIo.address + tlsIo.size, tlsIo.size);
|
.address + heap.size, heap.size, stack.address, stack.address + stack.size, stack.size, tlsIo.address, tlsIo.address + tlsIo.size, tlsIo.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryManager::MemoryManager(const DeviceState &state) : state(state) {}
|
void MemoryManager::InsertChunk(const ChunkDescriptor &chunk) {
|
||||||
|
std::unique_lock lock(mutex);
|
||||||
|
|
||||||
std::optional<DescriptorPack> MemoryManager::Get(u64 address, bool requireMapped) {
|
auto upper{std::upper_bound(chunks.begin(), chunks.end(), chunk.ptr, [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr; })};
|
||||||
auto chunk{GetChunk(address)};
|
if (upper == chunks.begin())
|
||||||
|
throw exception("InsertChunk: Chunk inserted outside address space: 0x{:X} - 0x{:X} and 0x{:X} - 0x{:X}", fmt::ptr(upper->ptr), fmt::ptr(upper->ptr + upper->size), chunk.ptr, fmt::ptr(chunk.ptr + chunk.size));
|
||||||
|
|
||||||
if (chunk)
|
upper = chunks.erase(upper, std::upper_bound(upper, chunks.end(), chunk.ptr + chunk.size, [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr; }));
|
||||||
return DescriptorPack{*GetBlock(address, chunk), *chunk};
|
if (upper != chunks.end() && upper->ptr < chunk.ptr + chunk.size) {
|
||||||
|
auto end{upper->ptr + upper->size};
|
||||||
|
upper->ptr = chunk.ptr + chunk.size;
|
||||||
|
upper->size = end - upper->ptr;
|
||||||
|
}
|
||||||
|
|
||||||
// If the requested address is in the address space but no chunks are present then we return a new unmapped region
|
auto lower{std::prev(upper)};
|
||||||
if (addressSpace.IsInside(address) && !requireMapped) {
|
if (lower->ptr == chunk.ptr && lower->size == chunk.size) {
|
||||||
auto upperChunk{std::upper_bound(chunks.begin(), chunks.end(), address, [](const u64 address, const ChunkDescriptor &chunk) -> bool {
|
lower->state = chunk.state;
|
||||||
return address < chunk.address;
|
lower->permission = chunk.permission;
|
||||||
})};
|
lower->attributes = chunk.attributes;
|
||||||
|
} else if (chunk.IsCompatible(*lower)) {
|
||||||
u64 upperAddress{};
|
lower->size = lower->size + chunk.size;
|
||||||
u64 lowerAddress{};
|
|
||||||
|
|
||||||
if (upperChunk != chunks.end()) {
|
|
||||||
upperAddress = upperChunk->address;
|
|
||||||
|
|
||||||
if (upperChunk == chunks.begin()) {
|
|
||||||
lowerAddress = addressSpace.address;
|
|
||||||
} else {
|
} else {
|
||||||
upperChunk--;
|
if (lower->ptr + lower->size > chunk.ptr)
|
||||||
lowerAddress = upperChunk->address + upperChunk->size;
|
lower->size = chunk.ptr - lower->ptr;
|
||||||
}
|
if (upper != chunks.end() && chunk.IsCompatible(*upper)) {
|
||||||
|
upper->ptr = chunk.ptr;
|
||||||
|
upper->size = chunk.size + upper->size;
|
||||||
} else {
|
} else {
|
||||||
upperAddress = addressSpace.address + addressSpace.size;
|
chunks.insert(upper, chunk);
|
||||||
lowerAddress = chunks.back().address + chunks.back().size;
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 size{upperAddress - lowerAddress};
|
std::optional<ChunkDescriptor> MemoryManager::Get(void* ptr) {
|
||||||
|
std::shared_lock lock(mutex);
|
||||||
|
|
||||||
return DescriptorPack{
|
auto chunk{std::upper_bound(chunks.begin(), chunks.end(), reinterpret_cast<u8 *>(ptr), [](const u8 *ptr, const ChunkDescriptor &chunk) -> bool { return ptr < chunk.ptr; })};
|
||||||
.chunk = {
|
if (chunk-- != chunks.begin())
|
||||||
.address = lowerAddress,
|
if ((chunk->ptr + chunk->size) > ptr)
|
||||||
.size = size,
|
return std::make_optional(*chunk);
|
||||||
.state = memory::states::Unmapped
|
|
||||||
},
|
|
||||||
.block = {
|
|
||||||
.address = lowerAddress,
|
|
||||||
.size = size,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
|
@ -7,12 +7,9 @@
|
|||||||
|
|
||||||
namespace skyline {
|
namespace skyline {
|
||||||
namespace memory {
|
namespace memory {
|
||||||
/**
|
|
||||||
* @brief The Permission struct holds the permission of a particular chunk of memory
|
|
||||||
*/
|
|
||||||
struct Permission {
|
struct Permission {
|
||||||
/**
|
/**
|
||||||
* @brief This constructor initializes all permissions to false
|
* @brief Initializes all permissions to false
|
||||||
*/
|
*/
|
||||||
constexpr Permission() : r(), w(), x() {}
|
constexpr Permission() : r(), w(), x() {}
|
||||||
|
|
||||||
@ -23,14 +20,8 @@ namespace skyline {
|
|||||||
*/
|
*/
|
||||||
constexpr Permission(bool read, bool write, bool execute) : r(read), w(write), x(execute) {}
|
constexpr Permission(bool read, bool write, bool execute) : r(read), w(write), x(execute) {}
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Equality operator between two Permission objects
|
|
||||||
*/
|
|
||||||
inline bool operator==(const Permission &rhs) const { return (this->r == rhs.r && this->w == rhs.w && this->x == rhs.x); }
|
inline bool operator==(const Permission &rhs) const { return (this->r == rhs.r && this->w == rhs.w && this->x == rhs.x); }
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Inequality operator between two Permission objects
|
|
||||||
*/
|
|
||||||
inline bool operator!=(const Permission &rhs) const { return !operator==(rhs); }
|
inline bool operator!=(const Permission &rhs) const { return !operator==(rhs); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -110,7 +101,6 @@ namespace skyline {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief The state of a certain block of memory
|
|
||||||
* @url https://switchbrew.org/wiki/SVC#MemoryState
|
* @url https://switchbrew.org/wiki/SVC#MemoryState
|
||||||
*/
|
*/
|
||||||
union MemoryState {
|
union MemoryState {
|
||||||
@ -119,7 +109,7 @@ namespace skyline {
|
|||||||
constexpr MemoryState() : value(0) {}
|
constexpr MemoryState() : value(0) {}
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
MemoryType type; //!< The MemoryType of this memory block
|
MemoryType type;
|
||||||
bool permissionChangeAllowed : 1; //!< If the application can use svcSetMemoryPermission on this block
|
bool permissionChangeAllowed : 1; //!< If the application can use svcSetMemoryPermission on this block
|
||||||
bool forceReadWritableByDebugSyscalls : 1; //!< If the application can use svcWriteDebugProcessMemory on this block
|
bool forceReadWritableByDebugSyscalls : 1; //!< If the application can use svcWriteDebugProcessMemory on this block
|
||||||
bool ipcSendAllowed : 1; //!< If this block is allowed to be sent as an IPC buffer with flags=0
|
bool ipcSendAllowed : 1; //!< If this block is allowed to be sent as an IPC buffer with flags=0
|
||||||
@ -172,22 +162,14 @@ namespace skyline {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct Region {
|
struct Region {
|
||||||
u64 address; //!< The base address of the region
|
u64 address;
|
||||||
u64 size; //!< The size of the region in bytes
|
size_t size;
|
||||||
|
|
||||||
/**
|
bool IsInside(void* ptr) {
|
||||||
* @brief Checks if the specified address is within the region
|
return (address <= reinterpret_cast<u64>(ptr)) && ((address + size) > reinterpret_cast<u64>(ptr));
|
||||||
* @param address The address to check
|
|
||||||
* @return If the address is inside the region
|
|
||||||
*/
|
|
||||||
inline bool IsInside(u64 address) {
|
|
||||||
return (this->address <= address) && ((this->address + this->size) > address);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief The type of the address space used by an application
|
|
||||||
*/
|
|
||||||
enum class AddressSpaceType {
|
enum class AddressSpaceType {
|
||||||
AddressSpace32Bit, //!< 32-bit address space used by 32-bit applications
|
AddressSpace32Bit, //!< 32-bit address space used by 32-bit applications
|
||||||
AddressSpace36Bit, //!< 36-bit address space used by 64-bit applications before 2.0.0
|
AddressSpace36Bit, //!< 36-bit address space used by 64-bit applications before 2.0.0
|
||||||
@ -202,130 +184,49 @@ namespace skyline {
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace kernel {
|
namespace kernel {
|
||||||
namespace type {
|
|
||||||
class KPrivateMemory;
|
|
||||||
class KSharedMemory;
|
|
||||||
class KTransferMemory;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace svc {
|
|
||||||
void SetMemoryAttribute(DeviceState &state);
|
|
||||||
|
|
||||||
void MapMemory(DeviceState &state);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief A single block of memory and all of it's individual attributes
|
|
||||||
*/
|
|
||||||
struct BlockDescriptor {
|
|
||||||
u64 address; //!< The address of the current block
|
|
||||||
u64 size; //!< The size of the current block in bytes
|
|
||||||
memory::Permission permission; //!< The permissions applied to the current block
|
|
||||||
memory::MemoryAttribute attributes; //!< The MemoryAttribute for the current block
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief A single chunk of memory, this is owned by a memory backing
|
|
||||||
*/
|
|
||||||
struct ChunkDescriptor {
|
struct ChunkDescriptor {
|
||||||
u64 address; //!< The address of the current chunk
|
u8* ptr;
|
||||||
u64 size; //!< The size of the current chunk in bytes
|
size_t size;
|
||||||
u64 host; //!< The address of the chunk in the host
|
memory::Permission permission;
|
||||||
memory::MemoryState state; //!< The MemoryState for the current block
|
memory::MemoryState state;
|
||||||
std::vector<BlockDescriptor> blockList; //!< The block descriptors for all the children blocks of this Chunk
|
memory::MemoryAttribute attributes;
|
||||||
|
|
||||||
|
constexpr bool IsCompatible(const ChunkDescriptor& chunk) const {
|
||||||
|
return chunk.permission == permission && chunk.state.value == state.value && chunk.attributes.value == attributes.value;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief A pack of both the descriptors for a specific address
|
* @brief MemoryManager keeps track of guest virtual memory and it's related attributes
|
||||||
*/
|
|
||||||
struct DescriptorPack {
|
|
||||||
const BlockDescriptor block; //!< The block descriptor at the address
|
|
||||||
const ChunkDescriptor chunk; //!< The chunk descriptor at the address
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief The MemoryManager class handles the memory map and the memory regions of the process
|
|
||||||
*/
|
*/
|
||||||
class MemoryManager {
|
class MemoryManager {
|
||||||
private:
|
private:
|
||||||
const DeviceState &state;
|
const DeviceState &state;
|
||||||
std::vector<ChunkDescriptor> chunks;
|
std::vector<ChunkDescriptor> chunks;
|
||||||
|
|
||||||
/**
|
public:
|
||||||
* @param address The address to find a chunk at
|
memory::Region addressSpace{}; //!< The entire address space
|
||||||
* @return A pointer to the ChunkDescriptor or nullptr in case chunk was not found
|
memory::Region base{}; //!< The application-accessible address space
|
||||||
*/
|
memory::Region code{};
|
||||||
ChunkDescriptor *GetChunk(u64 address);
|
memory::Region alias{};
|
||||||
|
memory::Region heap{};
|
||||||
|
memory::Region stack{};
|
||||||
|
memory::Region tlsIo{}; //!< TLS/IO
|
||||||
|
|
||||||
/**
|
std::shared_mutex mutex; //!< Synchronizes any operations done on the VMM, it is locked in shared mode by readers and exclusive mode by writers
|
||||||
* @param address The address to find a block at
|
|
||||||
* @return A pointer to the BlockDescriptor or nullptr in case chunk was not found
|
|
||||||
*/
|
|
||||||
BlockDescriptor *GetBlock(u64 address, ChunkDescriptor *chunk = nullptr);
|
|
||||||
|
|
||||||
/**
|
MemoryManager(const DeviceState &state);
|
||||||
* @brief Inserts a chunk into the memory map
|
|
||||||
* @param chunk The chunk to insert
|
|
||||||
*/
|
|
||||||
void InsertChunk(const ChunkDescriptor &chunk);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Deletes a chunk located at the address from the memory map
|
|
||||||
* @param address The address of the chunk to delete
|
|
||||||
*/
|
|
||||||
void DeleteChunk(u64 address);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Resize the specified chunk to the specified size
|
|
||||||
* @param chunk The chunk to resize
|
|
||||||
* @param size The new size of the chunk
|
|
||||||
*/
|
|
||||||
static void ResizeChunk(ChunkDescriptor *chunk, size_t size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Insert a block into a chunk
|
|
||||||
* @param chunk The chunk to insert the block into
|
|
||||||
* @param block The block to insert into the chunk
|
|
||||||
*/
|
|
||||||
static void InsertBlock(ChunkDescriptor *chunk, BlockDescriptor block);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Initializes all of the regions in the address space
|
* @brief Initializes all of the regions in the address space
|
||||||
* @param address The starting address of the code region
|
* @param address The starting address of the code region
|
||||||
* @param size The size of the code region
|
* @param size The size of the code region
|
||||||
* @param type The type of the address space
|
|
||||||
*/
|
*/
|
||||||
void InitializeRegions(u64 address, u64 size, memory::AddressSpaceType type);
|
void InitializeRegions(u64 address, u64 size, memory::AddressSpaceType type);
|
||||||
|
|
||||||
public:
|
void InsertChunk(const ChunkDescriptor &chunk);
|
||||||
friend class type::KPrivateMemory;
|
|
||||||
friend class type::KSharedMemory;
|
|
||||||
friend class type::KTransferMemory;
|
|
||||||
friend class type::KProcess;
|
|
||||||
friend class loader::NroLoader;
|
|
||||||
friend class loader::NsoLoader;
|
|
||||||
friend class loader::NcaLoader;
|
|
||||||
|
|
||||||
friend void svc::SetMemoryAttribute(DeviceState &state);
|
std::optional<ChunkDescriptor> Get(void* ptr);
|
||||||
|
|
||||||
friend void svc::MapMemory(skyline::DeviceState &state);
|
|
||||||
|
|
||||||
memory::Region addressSpace{}; //!< The Region object for the entire address space
|
|
||||||
memory::Region base{}; //!< The Region object for the entire address space accessible to the application
|
|
||||||
memory::Region code{}; //!< The Region object for the code memory region
|
|
||||||
memory::Region alias{}; //!< The Region object for the alias memory region
|
|
||||||
memory::Region heap{}; //!< The Region object for the heap memory region
|
|
||||||
memory::Region stack{}; //!< The Region object for the stack memory region
|
|
||||||
memory::Region tlsIo{}; //!< The Region object for the TLS/IO memory region
|
|
||||||
|
|
||||||
MemoryManager(const DeviceState &state);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param address The address to query in the memory map
|
|
||||||
* @param requireMapped If only mapped regions should be returned otherwise unmapped but valid regions will also be returned
|
|
||||||
* @return A DescriptorPack retrieved from the memory map
|
|
||||||
*/
|
|
||||||
std::optional<DescriptorPack> Get(u64 address, bool requireMapped = true);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief The total amount of space in bytes occupied by all memory mappings
|
* @brief The total amount of space in bytes occupied by all memory mappings
|
||||||
|
@ -22,16 +22,16 @@ namespace skyline::kernel::svc {
|
|||||||
heap->Resize(size);
|
heap->Resize(size);
|
||||||
|
|
||||||
state.ctx->registers.w0 = Result{};
|
state.ctx->registers.w0 = Result{};
|
||||||
state.ctx->registers.x1 = heap->address;
|
state.ctx->registers.x1 = reinterpret_cast<u64>(heap->ptr);
|
||||||
|
|
||||||
state.logger->Debug("svcSetHeapSize: Allocated at 0x{:X} for 0x{:X} bytes", heap->address, heap->size);
|
state.logger->Debug("svcSetHeapSize: Allocated at 0x{:X} for 0x{:X} bytes", fmt::ptr(heap->ptr), heap->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetMemoryAttribute(DeviceState &state) {
|
void SetMemoryAttribute(DeviceState &state) {
|
||||||
auto address{state.ctx->registers.x0};
|
auto ptr{reinterpret_cast<u8 *>(state.ctx->registers.x0)};
|
||||||
if (!util::PageAligned(address)) {
|
if (!util::PageAligned(ptr)) {
|
||||||
state.ctx->registers.w0 = result::InvalidAddress;
|
state.ctx->registers.w0 = result::InvalidAddress;
|
||||||
state.logger->Warn("svcSetMemoryAttribute: 'address' not page aligned: 0x{:X}", address);
|
state.logger->Warn("svcSetMemoryAttribute: 'address' not page aligned: 0x{:X}", fmt::ptr(ptr));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,35 +52,37 @@ namespace skyline::kernel::svc {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto chunk{state.os->memory.GetChunk(address)};
|
auto chunk{state.os->memory.Get(ptr)};
|
||||||
auto block{state.os->memory.GetBlock(address)};
|
if (!chunk) {
|
||||||
if (!chunk || !block) {
|
|
||||||
state.ctx->registers.w0 = result::InvalidAddress;
|
state.ctx->registers.w0 = result::InvalidAddress;
|
||||||
state.logger->Warn("svcSetMemoryAttribute: Cannot find memory region: 0x{:X}", address);
|
state.logger->Warn("svcSetMemoryAttribute: Cannot find memory region: 0x{:X}", fmt::ptr(ptr));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!chunk->state.attributeChangeAllowed) {
|
if (!chunk->state.attributeChangeAllowed) {
|
||||||
state.ctx->registers.w0 = result::InvalidState;
|
state.ctx->registers.w0 = result::InvalidState;
|
||||||
state.logger->Warn("svcSetMemoryAttribute: Attribute change not allowed for chunk: 0x{:X}", address);
|
state.logger->Warn("svcSetMemoryAttribute: Attribute change not allowed for chunk: 0x{:X}", fmt::ptr(ptr));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
block->attributes.isUncached = value.isUncached;
|
auto newChunk{*chunk};
|
||||||
MemoryManager::InsertBlock(chunk, *block);
|
newChunk.ptr = ptr;
|
||||||
|
newChunk.size = size;
|
||||||
|
newChunk.attributes.isUncached = value.isUncached;
|
||||||
|
state.os->memory.InsertChunk(newChunk);
|
||||||
|
|
||||||
state.logger->Debug("svcSetMemoryAttribute: Set caching to {} at 0x{:X} for 0x{:X} bytes", !block->attributes.isUncached, address, size);
|
state.logger->Debug("svcSetMemoryAttribute: Set caching to {} at 0x{:X} for 0x{:X} bytes", bool(value.isUncached), fmt::ptr(ptr), size);
|
||||||
state.ctx->registers.w0 = Result{};
|
state.ctx->registers.w0 = Result{};
|
||||||
}
|
}
|
||||||
|
|
||||||
void MapMemory(DeviceState &state) {
|
void MapMemory(DeviceState &state) {
|
||||||
auto destination{state.ctx->registers.x0};
|
auto destination{reinterpret_cast<u8*>(state.ctx->registers.x0)};
|
||||||
auto source{state.ctx->registers.x1};
|
auto source{reinterpret_cast<u8*>(state.ctx->registers.x1)};
|
||||||
auto size{state.ctx->registers.x2};
|
auto size{state.ctx->registers.x2};
|
||||||
|
|
||||||
if (!util::PageAligned(destination) || !util::PageAligned(source)) {
|
if (!util::PageAligned(destination) || !util::PageAligned(source)) {
|
||||||
state.ctx->registers.w0 = result::InvalidAddress;
|
state.ctx->registers.w0 = result::InvalidAddress;
|
||||||
state.logger->Warn("svcMapMemory: Addresses not page aligned: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
state.logger->Warn("svcMapMemory: Addresses not page aligned: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", fmt::ptr(source), fmt::ptr(destination), size);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,37 +95,37 @@ namespace skyline::kernel::svc {
|
|||||||
auto stack{state.os->memory.stack};
|
auto stack{state.os->memory.stack};
|
||||||
if (!stack.IsInside(destination)) {
|
if (!stack.IsInside(destination)) {
|
||||||
state.ctx->registers.w0 = result::InvalidMemoryRegion;
|
state.ctx->registers.w0 = result::InvalidMemoryRegion;
|
||||||
state.logger->Warn("svcMapMemory: Destination not within stack region: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
state.logger->Warn("svcMapMemory: Destination not within stack region: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", fmt::ptr(source), fmt::ptr(destination), size);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto descriptor{state.os->memory.Get(source)};
|
auto chunk{state.os->memory.Get(source)};
|
||||||
if (!descriptor) {
|
if (!chunk) {
|
||||||
state.ctx->registers.w0 = result::InvalidAddress;
|
state.ctx->registers.w0 = result::InvalidAddress;
|
||||||
state.logger->Warn("svcMapMemory: Source has no descriptor: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
state.logger->Warn("svcMapMemory: Source has no descriptor: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", fmt::ptr(source), fmt::ptr(destination), size);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!descriptor->chunk.state.mapAllowed) {
|
if (!chunk->state.mapAllowed) {
|
||||||
state.ctx->registers.w0 = result::InvalidState;
|
state.ctx->registers.w0 = result::InvalidState;
|
||||||
state.logger->Warn("svcMapMemory: Source doesn't allow usage of svcMapMemory: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes) 0x{:X}", source, destination, size, descriptor->chunk.state.value);
|
state.logger->Warn("svcMapMemory: Source doesn't allow usage of svcMapMemory: Source: 0x{:X}, Destination: 0x{:X}, Size: 0x{:X}, MemoryState: 0x{:X}", fmt::ptr(source), fmt::ptr(destination), size, chunk->state.value);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
state.process->NewHandle<type::KPrivateMemory>(destination, size, descriptor->block.permission, memory::states::Stack);
|
state.process->NewHandle<type::KPrivateMemory>(destination, size, chunk->permission, memory::states::Stack);
|
||||||
state.process->CopyMemory(source, destination, size);
|
memcpy(destination, source, size);
|
||||||
|
|
||||||
auto object{state.process->GetMemoryObject(source)};
|
auto object{state.process->GetMemoryObject(source)};
|
||||||
if (!object)
|
if (!object)
|
||||||
throw exception("svcMapMemory: Cannot find memory object in handle table for address 0x{:X}", source);
|
throw exception("svcMapMemory: Cannot find memory object in handle table for address 0x{:X}", fmt::ptr(source));
|
||||||
object->item->UpdatePermission(source, size, {false, false, false});
|
object->item->UpdatePermission(source, size, {false, false, false});
|
||||||
|
|
||||||
state.logger->Debug("svcMapMemory: Mapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", source, source + size, destination, destination + size, size);
|
state.logger->Debug("svcMapMemory: Mapped range 0x{:X} - 0x{:X} to 0x{:X} - 0x{:X} (Size: 0x{:X} bytes)", fmt::ptr(source), fmt::ptr(source + size), fmt::ptr(destination), fmt::ptr(destination + size), size);
|
||||||
state.ctx->registers.w0 = Result{};
|
state.ctx->registers.w0 = Result{};
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnmapMemory(DeviceState &state) {
|
void UnmapMemory(DeviceState &state) {
|
||||||
auto source{state.ctx->registers.x0};
|
auto source{reinterpret_cast<u8*>(state.ctx->registers.x0)};
|
||||||
auto destination{state.ctx->registers.x1};
|
auto destination{reinterpret_cast<u8*>(state.ctx->registers.x1)};
|
||||||
auto size{state.ctx->registers.x2};
|
auto size{state.ctx->registers.x2};
|
||||||
|
|
||||||
if (!util::PageAligned(destination) || !util::PageAligned(source)) {
|
if (!util::PageAligned(destination) || !util::PageAligned(source)) {
|
||||||
@ -145,17 +147,17 @@ namespace skyline::kernel::svc {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto sourceDesc{state.os->memory.Get(source)};
|
auto sourceChunk{state.os->memory.Get(source)};
|
||||||
auto destDesc{state.os->memory.Get(destination)};
|
auto destChunk{state.os->memory.Get(destination)};
|
||||||
if (!sourceDesc || !destDesc) {
|
if (!sourceChunk || !destChunk) {
|
||||||
state.ctx->registers.w0 = result::InvalidAddress;
|
state.ctx->registers.w0 = result::InvalidAddress;
|
||||||
state.logger->Warn("svcUnmapMemory: Addresses have no descriptor: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
state.logger->Warn("svcUnmapMemory: Addresses have no descriptor: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes)", source, destination, size);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!destDesc->chunk.state.mapAllowed) {
|
if (!destChunk->state.mapAllowed) {
|
||||||
state.ctx->registers.w0 = result::InvalidState;
|
state.ctx->registers.w0 = result::InvalidState;
|
||||||
state.logger->Warn("svcUnmapMemory: Destination doesn't allow usage of svcMapMemory: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes) 0x{:X}", source, destination, size, destDesc->chunk.state.value);
|
state.logger->Warn("svcUnmapMemory: Destination doesn't allow usage of svcMapMemory: Source: 0x{:X}, Destination: 0x{:X} (Size: 0x{:X} bytes) 0x{:X}", source, destination, size, destChunk->state.value);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,9 +165,9 @@ namespace skyline::kernel::svc {
|
|||||||
if (!destObject)
|
if (!destObject)
|
||||||
throw exception("svcUnmapMemory: Cannot find destination memory object in handle table for address 0x{:X}", destination);
|
throw exception("svcUnmapMemory: Cannot find destination memory object in handle table for address 0x{:X}", destination);
|
||||||
|
|
||||||
destObject->item->UpdatePermission(destination, size, sourceDesc->block.permission);
|
destObject->item->UpdatePermission(destination, size, sourceChunk->permission);
|
||||||
|
|
||||||
state.process->CopyMemory(destination, source, size);
|
std::memcpy(source, destination, size);
|
||||||
|
|
||||||
auto sourceObject{state.process->GetMemoryObject(destination)};
|
auto sourceObject{state.process->GetMemoryObject(destination)};
|
||||||
if (!sourceObject)
|
if (!sourceObject)
|
||||||
@ -180,23 +182,23 @@ namespace skyline::kernel::svc {
|
|||||||
void QueryMemory(DeviceState &state) {
|
void QueryMemory(DeviceState &state) {
|
||||||
memory::MemoryInfo memInfo{};
|
memory::MemoryInfo memInfo{};
|
||||||
|
|
||||||
auto address{state.ctx->registers.x2};
|
auto ptr{reinterpret_cast<u8*>(state.ctx->registers.x2)};
|
||||||
auto descriptor{state.os->memory.Get(address, false)};
|
auto chunk{state.os->memory.Get(ptr)};
|
||||||
|
|
||||||
if (descriptor) {
|
if (chunk) {
|
||||||
memInfo = {
|
memInfo = {
|
||||||
.address = descriptor->block.address,
|
.address = reinterpret_cast<u64>(chunk->ptr),
|
||||||
.size = descriptor->block.size,
|
.size = chunk->size,
|
||||||
.type = static_cast<u32>(descriptor->chunk.state.type),
|
.type = static_cast<u32>(chunk->state.type),
|
||||||
.attributes = descriptor->block.attributes.value,
|
.attributes = chunk->attributes.value,
|
||||||
.permissions = static_cast<u32>(descriptor->block.permission.Get()),
|
.permissions = static_cast<u32>(chunk->permission.Get()),
|
||||||
.deviceRefCount = 0,
|
.deviceRefCount = 0,
|
||||||
.ipcRefCount = 0,
|
.ipcRefCount = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
state.logger->Debug("svcQueryMemory: Address: 0x{:X}, Size: 0x{:X}, Type: 0x{:X}, Is Uncached: {}, Permissions: {}{}{}", memInfo.address, memInfo.size, memInfo.type, static_cast<bool>(descriptor->block.attributes.isUncached), descriptor->block.permission.r ? "R" : "-", descriptor->block.permission.w ? "W" : "-", descriptor->block.permission.x ? "X" : "-");
|
state.logger->Debug("svcQueryMemory: Address: 0x{:X}, Size: 0x{:X}, Type: 0x{:X}, Is Uncached: {}, Permissions: {}{}{}", memInfo.address, memInfo.size, memInfo.type, bool(chunk->attributes.isUncached), chunk->permission.r ? 'R' : '-', chunk->permission.w ? 'W' : '-', chunk->permission.x ? 'X' : '-');
|
||||||
} else {
|
} else {
|
||||||
auto addressSpaceEnd{state.os->memory.addressSpace.address + state.os->memory.addressSpace.size};
|
auto addressSpaceEnd{reinterpret_cast<u64>(state.os->memory.addressSpace.address + state.os->memory.addressSpace.size)};
|
||||||
|
|
||||||
memInfo = {
|
memInfo = {
|
||||||
.address = addressSpaceEnd,
|
.address = addressSpaceEnd,
|
||||||
@ -204,7 +206,7 @@ namespace skyline::kernel::svc {
|
|||||||
.type = static_cast<u32>(memory::MemoryType::Reserved),
|
.type = static_cast<u32>(memory::MemoryType::Reserved),
|
||||||
};
|
};
|
||||||
|
|
||||||
state.logger->Debug("svcQueryMemory: Trying to query memory outside of the application's address space: 0x{:X}", address);
|
state.logger->Debug("svcQueryMemory: Trying to query memory outside of the application's address space: 0x{:X}", fmt::ptr(ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
state.process->WriteMemory(memInfo, state.ctx->registers.x0);
|
state.process->WriteMemory(memInfo, state.ctx->registers.x0);
|
||||||
@ -310,11 +312,11 @@ namespace skyline::kernel::svc {
|
|||||||
void MapSharedMemory(DeviceState &state) {
|
void MapSharedMemory(DeviceState &state) {
|
||||||
try {
|
try {
|
||||||
auto object{state.process->GetHandle<type::KSharedMemory>(state.ctx->registers.w0)};
|
auto object{state.process->GetHandle<type::KSharedMemory>(state.ctx->registers.w0)};
|
||||||
auto address{state.ctx->registers.x1};
|
auto ptr{reinterpret_cast<u8 *>(state.ctx->registers.x1)};
|
||||||
|
|
||||||
if (!util::PageAligned(address)) {
|
if (!util::PageAligned(ptr)) {
|
||||||
state.ctx->registers.w0 = result::InvalidAddress;
|
state.ctx->registers.w0 = result::InvalidAddress;
|
||||||
state.logger->Warn("svcMapSharedMemory: 'address' not page aligned: 0x{:X}", address);
|
state.logger->Warn("svcMapSharedMemory: 'ptr' not page aligned: 0x{:X}", ptr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,14 +329,14 @@ namespace skyline::kernel::svc {
|
|||||||
|
|
||||||
auto permission{*reinterpret_cast<memory::Permission *>(&state.ctx->registers.w3)};
|
auto permission{*reinterpret_cast<memory::Permission *>(&state.ctx->registers.w3)};
|
||||||
if ((permission.w && !permission.r) || (permission.x && !permission.r)) {
|
if ((permission.w && !permission.r) || (permission.x && !permission.r)) {
|
||||||
state.logger->Warn("svcMapSharedMemory: 'permission' invalid: {}{}{}", permission.r ? "R" : "-", permission.w ? "W" : "-", permission.x ? "X" : "-");
|
state.logger->Warn("svcMapSharedMemory: 'permission' invalid: {}{}{}", permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||||
state.ctx->registers.w0 = result::InvalidNewMemoryPermission;
|
state.ctx->registers.w0 = result::InvalidNewMemoryPermission;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
state.logger->Debug("svcMapSharedMemory: Mapping shared memory at 0x{:X} for {} bytes ({}{}{})", address, size, permission.r ? "R" : "-", permission.w ? "W" : "-", permission.x ? "X" : "-");
|
state.logger->Debug("svcMapSharedMemory: Mapping shared memory at 0x{:X} for {} bytes ({}{}{})", ptr, size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||||
|
|
||||||
object->Map(address, size, permission);
|
object->Map(ptr, size, permission);
|
||||||
|
|
||||||
state.ctx->registers.w0 = Result{};
|
state.ctx->registers.w0 = Result{};
|
||||||
} catch (const std::exception &) {
|
} catch (const std::exception &) {
|
||||||
@ -344,10 +346,10 @@ namespace skyline::kernel::svc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CreateTransferMemory(DeviceState &state) {
|
void CreateTransferMemory(DeviceState &state) {
|
||||||
auto address{state.ctx->registers.x1};
|
auto ptr{reinterpret_cast<u8 *>(state.ctx->registers.x1)};
|
||||||
if (!util::PageAligned(address)) {
|
if (!util::PageAligned(ptr)) {
|
||||||
state.ctx->registers.w0 = result::InvalidAddress;
|
state.ctx->registers.w0 = result::InvalidAddress;
|
||||||
state.logger->Warn("svcCreateTransferMemory: 'address' not page aligned: 0x{:X}", address);
|
state.logger->Warn("svcCreateTransferMemory: 'ptr' not page aligned: 0x{:X}", ptr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -360,17 +362,16 @@ namespace skyline::kernel::svc {
|
|||||||
|
|
||||||
auto permission{*reinterpret_cast<memory::Permission *>(&state.ctx->registers.w3)};
|
auto permission{*reinterpret_cast<memory::Permission *>(&state.ctx->registers.w3)};
|
||||||
if ((permission.w && !permission.r) || (permission.x && !permission.r)) {
|
if ((permission.w && !permission.r) || (permission.x && !permission.r)) {
|
||||||
state.logger->Warn("svcCreateTransferMemory: 'permission' invalid: {}{}{}", permission.r ? "R" : "-", permission.w ? "W" : "-", permission.x ? "X" : "-");
|
state.logger->Warn("svcCreateTransferMemory: 'permission' invalid: {}{}{}", permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||||
state.ctx->registers.w0 = result::InvalidNewMemoryPermission;
|
state.ctx->registers.w0 = result::InvalidNewMemoryPermission;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
state.logger->Debug("svcCreateTransferMemory: Creating transfer memory at 0x{:X} for {} bytes ({}{}{})", address, size, permission.r ? "R" : "-", permission.w ? "W" : "-", permission.x ? "X" : "-");
|
auto tmem{state.process->NewHandle<type::KTransferMemory>(ptr, size, permission)};
|
||||||
|
state.logger->Debug("svcCreateTransferMemory: Creating transfer memory at 0x{:X} for {} bytes ({}{}{})", fmt::ptr(ptr), size, permission.r ? 'R' : '-', permission.w ? 'W' : '-', permission.x ? 'X' : '-');
|
||||||
auto shmem{state.process->NewHandle<type::KTransferMemory>(state.process->pid, address, size, permission)};
|
|
||||||
|
|
||||||
state.ctx->registers.w0 = Result{};
|
state.ctx->registers.w0 = Result{};
|
||||||
state.ctx->registers.w1 = shmem.handle;
|
state.ctx->registers.w1 = tmem.handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CloseHandle(DeviceState &state) {
|
void CloseHandle(DeviceState &state) {
|
||||||
@ -624,10 +625,10 @@ namespace skyline::kernel::svc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void OutputDebugString(DeviceState &state) {
|
void OutputDebugString(DeviceState &state) {
|
||||||
auto debug{state.process->GetString(state.ctx->registers.x0, state.ctx->registers.x1)};
|
auto debug{span(reinterpret_cast<u8*>(state.ctx->registers.x0), state.ctx->registers.x1).as_string()};
|
||||||
|
|
||||||
if (debug.back() == '\n')
|
if (debug.back() == '\n')
|
||||||
debug.pop_back();
|
debug.remove_suffix(1);
|
||||||
|
|
||||||
state.logger->Info("Debug Output: {}", debug);
|
state.logger->Info("Debug Output: {}", debug);
|
||||||
state.ctx->registers.w0 = Result{};
|
state.ctx->registers.w0 = Result{};
|
||||||
@ -695,7 +696,7 @@ namespace skyline::kernel::svc {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case constant::infoState::PersonalMmHeapUsage:
|
case constant::infoState::PersonalMmHeapUsage:
|
||||||
out = state.process->heap->address + constant::DefStackSize;
|
out = state.process->heap->size + constant::DefStackSize;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case constant::infoState::TotalMemoryAvailableWithoutMmHeap:
|
case constant::infoState::TotalMemoryAvailableWithoutMmHeap:
|
||||||
@ -707,7 +708,7 @@ namespace skyline::kernel::svc {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case constant::infoState::UserExceptionContextAddr:
|
case constant::infoState::UserExceptionContextAddr:
|
||||||
out = state.process->tlsPages[0]->Get(0);
|
out = reinterpret_cast<u64>(state.process->tlsPages[0]->Get(0));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -15,34 +15,23 @@ namespace skyline::kernel::type {
|
|||||||
KMemory(const DeviceState &state, KType objectType) : KObject(state, objectType) {}
|
KMemory(const DeviceState &state, KType objectType) : KObject(state, objectType) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Remap a chunk of memory as to change the size occupied by it
|
* @return A span representing the memory object on the guest
|
||||||
* @param size The new size of the memory
|
|
||||||
* @return The address the memory was remapped to
|
|
||||||
*/
|
*/
|
||||||
virtual void Resize(size_t size) = 0;
|
virtual span<u8> Get() = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Updates the permissions of a block of mapped memory
|
* @brief Updates the permissions of a block of mapped memory
|
||||||
* @param address The starting address to change the permissions at
|
* @param ptr The starting address to change the permissions at
|
||||||
* @param size The size of the partition to change the permissions of
|
* @param size The size of the partition to change the permissions of
|
||||||
* @param permission The new permissions to be set for the memory
|
* @param permission The new permissions to be set for the memory
|
||||||
*/
|
*/
|
||||||
virtual void UpdatePermission(u64 address, u64 size, memory::Permission permission) = 0;
|
virtual void UpdatePermission(u8* ptr, size_t size, memory::Permission permission) = 0;
|
||||||
|
|
||||||
/**
|
bool IsInside(u8* ptr) {
|
||||||
* @brief Updates the permissions of a chunk of mapped memory
|
auto spn{Get()};
|
||||||
* @param permission The new permissions to be set for the memory
|
return (spn.data() <= ptr) && ((spn.data() + spn.size()) > ptr);
|
||||||
*/
|
}
|
||||||
inline virtual void UpdatePermission(memory::Permission permission) = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Checks if the specified address is within the memory object
|
|
||||||
* @param address The address to check
|
|
||||||
* @return If the address is inside the memory object
|
|
||||||
*/
|
|
||||||
inline virtual bool IsInside(u64 address) = 0;
|
|
||||||
|
|
||||||
virtual ~KMemory() = default;
|
virtual ~KMemory() = default;
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -10,153 +10,69 @@
|
|||||||
#include "KProcess.h"
|
#include "KProcess.h"
|
||||||
|
|
||||||
namespace skyline::kernel::type {
|
namespace skyline::kernel::type {
|
||||||
KPrivateMemory::KPrivateMemory(const DeviceState &state, u64 address, size_t size, memory::Permission permission, memory::MemoryState memState) : size(size), KMemory(state, KType::KPrivateMemory) {
|
KPrivateMemory::KPrivateMemory(const DeviceState &state, u8* ptr, size_t size, memory::Permission permission, memory::MemoryState memState) : size(size), permission(permission), memState(memState), KMemory(state, KType::KPrivateMemory) {
|
||||||
if (address && !util::PageAligned(address))
|
if (ptr && !util::PageAligned(ptr))
|
||||||
throw exception("KPrivateMemory was created with non-page-aligned address: 0x{:X}", address);
|
throw exception("KPrivateMemory was created with non-page-aligned address: 0x{:X}", fmt::ptr(ptr));
|
||||||
|
|
||||||
fd = ASharedMemory_create("KPrivateMemory", size);
|
ptr = reinterpret_cast<u8*>(mmap(ptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, ptr ? MAP_FIXED : 0, 0, 0));
|
||||||
if (fd < 0)
|
if (ptr == MAP_FAILED)
|
||||||
throw exception("An error occurred while creating shared memory: {}", fd);
|
throw exception("An occurred while mapping private memory: {}", strerror(errno));
|
||||||
|
|
||||||
auto host{mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, fd, 0)};
|
state.os->memory.InsertChunk(ChunkDescriptor{
|
||||||
if (host == MAP_FAILED)
|
.ptr = ptr,
|
||||||
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
|
|
||||||
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = address,
|
|
||||||
.x1 = size,
|
|
||||||
.x2 = static_cast<u64>(permission.Get()),
|
|
||||||
.x3 = static_cast<u64>(MAP_SHARED | ((address) ? MAP_FIXED : 0)),
|
|
||||||
.x4 = static_cast<u64>(fd),
|
|
||||||
.x8 = __NR_mmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while mapping private memory in child process");
|
|
||||||
|
|
||||||
this->address = fregs.x0;
|
|
||||||
|
|
||||||
BlockDescriptor block{
|
|
||||||
.address = fregs.x0,
|
|
||||||
.size = size,
|
.size = size,
|
||||||
.permission = permission,
|
.permission = permission,
|
||||||
};
|
|
||||||
ChunkDescriptor chunk{
|
|
||||||
.address = fregs.x0,
|
|
||||||
.size = size,
|
|
||||||
.host = reinterpret_cast<u64>(host),
|
|
||||||
.state = memState,
|
.state = memState,
|
||||||
.blockList = {block},
|
});
|
||||||
};
|
|
||||||
state.os->memory.InsertChunk(chunk);
|
this->ptr = ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KPrivateMemory::Resize(size_t nSize) {
|
void KPrivateMemory::Resize(size_t nSize) {
|
||||||
if (close(fd) < 0)
|
ptr = reinterpret_cast<u8*>(mremap(ptr, size, nSize, 0));
|
||||||
throw exception("An error occurred while trying to close shared memory FD: {}", strerror(errno));
|
if (ptr == MAP_FAILED)
|
||||||
|
throw exception("An occurred while resizing private memory: {}", strerror(errno));
|
||||||
|
|
||||||
fd = ASharedMemory_create("KPrivateMemory", nSize);
|
if (nSize < size) {
|
||||||
if (fd < 0)
|
state.os->memory.InsertChunk(ChunkDescriptor{
|
||||||
throw exception("An error occurred while creating shared memory: {}", fd);
|
.ptr = ptr + nSize,
|
||||||
|
.size = size - nSize,
|
||||||
Registers fregs{
|
.state = memory::states::Unmapped,
|
||||||
.x0 = address,
|
});
|
||||||
.x1 = size,
|
} else if (size < nSize) {
|
||||||
.x8 = __NR_munmap
|
state.os->memory.InsertChunk(ChunkDescriptor{
|
||||||
};
|
.ptr = ptr + size,
|
||||||
|
.size = nSize - size,
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
.permission = permission,
|
||||||
if (fregs.x0 < 0)
|
.state = memState,
|
||||||
throw exception("An error occurred while unmapping private memory in child process");
|
});
|
||||||
|
|
||||||
fregs = {
|
|
||||||
.x0 = address,
|
|
||||||
.x1 = nSize,
|
|
||||||
.x2 = static_cast<u64>(PROT_READ | PROT_WRITE | PROT_EXEC),
|
|
||||||
.x3 = static_cast<u64>(MAP_SHARED | MAP_FIXED),
|
|
||||||
.x4 = static_cast<u64>(fd),
|
|
||||||
.x8 = __NR_mmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while remapping private memory in child process");
|
|
||||||
|
|
||||||
auto chunk{state.os->memory.GetChunk(address)};
|
|
||||||
state.process->WriteMemory(reinterpret_cast<void *>(chunk->host), address, std::min(nSize, size), true);
|
|
||||||
|
|
||||||
for (const auto &block : chunk->blockList) {
|
|
||||||
if ((block.address - chunk->address) < size) {
|
|
||||||
fregs = {
|
|
||||||
.x0 = block.address,
|
|
||||||
.x1 = std::min(block.size, (chunk->address + nSize) - block.address),
|
|
||||||
.x2 = static_cast<u64>(block.permission.Get()),
|
|
||||||
.x8 = __NR_mprotect,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while updating private memory's permissions in child process");
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
munmap(reinterpret_cast<void *>(chunk->host), size);
|
|
||||||
|
|
||||||
auto host{mmap(reinterpret_cast<void *>(chunk->host), nSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, fd, 0)};
|
|
||||||
if (host == MAP_FAILED)
|
|
||||||
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
|
|
||||||
|
|
||||||
chunk->host = reinterpret_cast<u64>(host);
|
|
||||||
MemoryManager::ResizeChunk(chunk, nSize);
|
|
||||||
size = nSize;
|
size = nSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KPrivateMemory::UpdatePermission(u64 address, u64 size, memory::Permission permission) {
|
void KPrivateMemory::UpdatePermission(u8* ptr, size_t size, memory::Permission permission) {
|
||||||
Registers fregs{
|
if (ptr && !util::PageAligned(ptr))
|
||||||
.x0 = address,
|
throw exception("KPrivateMemory permission updated with a non-page-aligned address: 0x{:X}", fmt::ptr(ptr));
|
||||||
.x1 = size,
|
|
||||||
.x2 = static_cast<u64>(permission.Get()),
|
|
||||||
.x8 = __NR_mprotect,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while updating private memory's permissions in child process");
|
|
||||||
|
|
||||||
auto chunk{state.os->memory.GetChunk(address)};
|
|
||||||
|
|
||||||
// If a static code region has been mapped as writable it needs to be changed to mutable
|
// If a static code region has been mapped as writable it needs to be changed to mutable
|
||||||
if (chunk->state.value == memory::states::CodeStatic.value && permission.w)
|
if (memState.value == memory::states::CodeStatic.value && permission.w)
|
||||||
chunk->state = memory::states::CodeMutable;
|
memState = memory::states::CodeMutable;
|
||||||
|
|
||||||
BlockDescriptor block{
|
state.os->memory.InsertChunk(ChunkDescriptor{
|
||||||
.address = address,
|
.ptr = ptr,
|
||||||
.size = size,
|
.size = size,
|
||||||
.permission = permission,
|
.permission = permission,
|
||||||
};
|
.state = memState,
|
||||||
MemoryManager::InsertBlock(chunk, block);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
KPrivateMemory::~KPrivateMemory() {
|
KPrivateMemory::~KPrivateMemory() {
|
||||||
try {
|
munmap(ptr, size);
|
||||||
if (state.process) {
|
state.os->memory.InsertChunk(ChunkDescriptor{
|
||||||
Registers fregs{
|
.ptr = ptr,
|
||||||
.x0 = address,
|
.size = size,
|
||||||
.x1 = size,
|
.state = memory::states::Unmapped,
|
||||||
.x8 = __NR_munmap,
|
});
|
||||||
};
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
}
|
|
||||||
} catch (const std::exception &) {
|
|
||||||
}
|
|
||||||
|
|
||||||
auto chunk{state.os->memory.GetChunk(address)};
|
|
||||||
if (chunk) {
|
|
||||||
munmap(reinterpret_cast<void *>(chunk->host), chunk->size);
|
|
||||||
state.os->memory.DeleteChunk(address);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -10,27 +10,23 @@ namespace skyline::kernel::type {
|
|||||||
* @brief KPrivateMemory is used to map memory local to the guest process
|
* @brief KPrivateMemory is used to map memory local to the guest process
|
||||||
*/
|
*/
|
||||||
class KPrivateMemory : public KMemory {
|
class KPrivateMemory : public KMemory {
|
||||||
private:
|
|
||||||
int fd; //!< A file descriptor to the underlying shared memory
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
u64 address{}; //!< The address of the allocated memory
|
u8* ptr{};
|
||||||
size_t size{}; //!< The size of the allocated memory
|
size_t size{};
|
||||||
|
memory::Permission permission;
|
||||||
|
memory::MemoryState memState;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param address The address to map to (If NULL then an arbitrary address is picked)
|
* @param ptr The address to map to (If NULL then an arbitrary address is picked)
|
||||||
* @param size The size of the allocation
|
* @param permission The permissions for the allocated memory (As reported to the application, host memory permissions aren't reflected by this)
|
||||||
* @param permission The permissions for the allocated memory
|
|
||||||
* @param memState The MemoryState of the chunk of memory
|
|
||||||
*/
|
*/
|
||||||
KPrivateMemory(const DeviceState &state, u64 address, size_t size, memory::Permission permission, memory::MemoryState memState);
|
KPrivateMemory(const DeviceState &state, u8* ptr, size_t size, memory::Permission permission, memory::MemoryState memState);
|
||||||
|
|
||||||
/**
|
void Resize(size_t size);
|
||||||
* @brief Remap a chunk of memory as to change the size occupied by it
|
|
||||||
* @param size The new size of the memory
|
inline span<u8> Get() override {
|
||||||
* @return The address the memory was remapped to
|
return span(ptr, size);
|
||||||
*/
|
}
|
||||||
virtual void Resize(size_t size);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Updates the permissions of a block of mapped memory
|
* @brief Updates the permissions of a block of mapped memory
|
||||||
@ -38,24 +34,7 @@ namespace skyline::kernel::type {
|
|||||||
* @param size The size of the partition to change the permissions of
|
* @param size The size of the partition to change the permissions of
|
||||||
* @param permission The new permissions to be set for the memory
|
* @param permission The new permissions to be set for the memory
|
||||||
*/
|
*/
|
||||||
virtual void UpdatePermission(u64 address, u64 size, memory::Permission permission);
|
void UpdatePermission(u8* ptr, size_t size, memory::Permission permission) override;
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Updates the permissions of a chunk of mapped memory
|
|
||||||
* @param permission The new permissions to be set for the memory
|
|
||||||
*/
|
|
||||||
inline virtual void UpdatePermission(memory::Permission permission) {
|
|
||||||
UpdatePermission(address, size, permission);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Checks if the specified address is within the memory object
|
|
||||||
* @param address The address to check
|
|
||||||
* @return If the address is inside the memory object
|
|
||||||
*/
|
|
||||||
inline virtual bool IsInside(u64 address) {
|
|
||||||
return (this->address <= address) && ((this->address + this->size) > address);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief The destructor of private memory, it deallocates the memory
|
* @brief The destructor of private memory, it deallocates the memory
|
||||||
|
@ -11,9 +11,9 @@
|
|||||||
#include "KProcess.h"
|
#include "KProcess.h"
|
||||||
|
|
||||||
namespace skyline::kernel::type {
|
namespace skyline::kernel::type {
|
||||||
KProcess::TlsPage::TlsPage(u64 address) : address(address) {}
|
KProcess::TlsPage::TlsPage(u8* ptr) : ptr(ptr) {}
|
||||||
|
|
||||||
u64 KProcess::TlsPage::ReserveSlot() {
|
u8* KProcess::TlsPage::ReserveSlot() {
|
||||||
if (Full())
|
if (Full())
|
||||||
throw exception("Trying to get TLS slot from full page");
|
throw exception("Trying to get TLS slot from full page");
|
||||||
|
|
||||||
@ -21,32 +21,32 @@ namespace skyline::kernel::type {
|
|||||||
return Get(index++); // ++ on right will cause increment after evaluation of expression
|
return Get(index++); // ++ on right will cause increment after evaluation of expression
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KProcess::TlsPage::Get(u8 slotNo) {
|
u8* KProcess::TlsPage::Get(u8 slotNo) {
|
||||||
if (slotNo >= constant::TlsSlots)
|
if (slotNo >= constant::TlsSlots)
|
||||||
throw exception("TLS slot is out of range");
|
throw exception("TLS slot is out of range");
|
||||||
|
|
||||||
return address + (constant::TlsSlotSize * slotNo);
|
return ptr + (constant::TlsSlotSize * slotNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KProcess::TlsPage::Full() {
|
bool KProcess::TlsPage::Full() {
|
||||||
return slot[constant::TlsSlots - 1];
|
return slot[constant::TlsSlots - 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KProcess::GetTlsSlot() {
|
u8* KProcess::GetTlsSlot() {
|
||||||
for (auto &tlsPage: tlsPages)
|
for (auto &tlsPage: tlsPages)
|
||||||
if (!tlsPage->Full())
|
if (!tlsPage->Full())
|
||||||
return tlsPage->ReserveSlot();
|
return tlsPage->ReserveSlot();
|
||||||
|
|
||||||
u64 address;
|
u8* ptr;
|
||||||
if (tlsPages.empty()) {
|
if (tlsPages.empty()) {
|
||||||
auto region{state.os->memory.tlsIo};
|
auto region{state.os->memory.tlsIo};
|
||||||
address = region.size ? region.address : 0;
|
ptr = reinterpret_cast<u8*>(region.size ? region.address : 0);
|
||||||
} else {
|
} else {
|
||||||
address = (*(tlsPages.end() - 1))->address + PAGE_SIZE;
|
ptr = (*(tlsPages.end() - 1))->ptr + PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto tlsMem{NewHandle<KPrivateMemory>(address, PAGE_SIZE, memory::Permission(true, true, false), memory::states::ThreadLocal).item};
|
auto tlsMem{NewHandle<KPrivateMemory>(ptr, PAGE_SIZE, memory::Permission(true, true, false), memory::states::ThreadLocal).item};
|
||||||
tlsPages.push_back(std::make_shared<TlsPage>(tlsMem->address));
|
tlsPages.push_back(std::make_shared<TlsPage>(tlsMem->ptr));
|
||||||
|
|
||||||
auto &tlsPage{tlsPages.back()};
|
auto &tlsPage{tlsPages.back()};
|
||||||
if (tlsPages.empty())
|
if (tlsPages.empty())
|
||||||
@ -57,14 +57,14 @@ namespace skyline::kernel::type {
|
|||||||
|
|
||||||
void KProcess::InitializeMemory() {
|
void KProcess::InitializeMemory() {
|
||||||
constexpr size_t DefHeapSize{0x200000}; // The default amount of heap
|
constexpr size_t DefHeapSize{0x200000}; // The default amount of heap
|
||||||
heap = NewHandle<KPrivateMemory>(state.os->memory.heap.address, DefHeapSize, memory::Permission{true, true, false}, memory::states::Heap).item;
|
heap = NewHandle<KPrivateMemory>(reinterpret_cast<u8*>(state.os->memory.heap.address), DefHeapSize, memory::Permission{true, true, false}, memory::states::Heap).item;
|
||||||
threads[pid]->tls = GetTlsSlot();
|
threads[pid]->tls = GetTlsSlot();
|
||||||
}
|
}
|
||||||
|
|
||||||
KProcess::KProcess(const DeviceState &state, pid_t pid, u64 entryPoint, std::shared_ptr<type::KSharedMemory> &stack, std::shared_ptr<type::KSharedMemory> &tlsMemory) : pid(pid), stack(stack), KSyncObject(state, KType::KProcess) {
|
KProcess::KProcess(const DeviceState &state, pid_t pid, u64 entryPoint, std::shared_ptr<type::KSharedMemory> &stack, std::shared_ptr<type::KSharedMemory> &tlsMemory) : pid(pid), stack(stack), KSyncObject(state, KType::KProcess) {
|
||||||
constexpr u8 DefaultPriority{44}; // The default priority of a process
|
constexpr u8 DefaultPriority{44}; // The default priority of a process
|
||||||
|
|
||||||
auto thread{NewHandle<KThread>(pid, entryPoint, 0x0, stack->guest.address + stack->guest.size, 0, DefaultPriority, this, tlsMemory).item};
|
auto thread{NewHandle<KThread>(pid, entryPoint, 0, reinterpret_cast<u64>(stack->guest.ptr + stack->guest.size), nullptr, DefaultPriority, this, tlsMemory).item};
|
||||||
threads[pid] = thread;
|
threads[pid] = thread;
|
||||||
state.nce->WaitThreadInit(thread);
|
state.nce->WaitThreadInit(thread);
|
||||||
|
|
||||||
@ -80,12 +80,12 @@ namespace skyline::kernel::type {
|
|||||||
|
|
||||||
std::shared_ptr<KThread> KProcess::CreateThread(u64 entryPoint, u64 entryArg, u64 stackTop, i8 priority) {
|
std::shared_ptr<KThread> KProcess::CreateThread(u64 entryPoint, u64 entryArg, u64 stackTop, i8 priority) {
|
||||||
auto size{(sizeof(ThreadContext) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1)};
|
auto size{(sizeof(ThreadContext) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1)};
|
||||||
auto tlsMem{std::make_shared<type::KSharedMemory>(state, 0, size, memory::Permission{true, true, false}, memory::states::Reserved)};
|
auto tlsMem{std::make_shared<type::KSharedMemory>(state, size, memory::states::Reserved)};
|
||||||
|
|
||||||
Registers fregs{
|
Registers fregs{
|
||||||
.x0 = CLONE_THREAD | CLONE_SIGHAND | CLONE_PTRACE | CLONE_FS | CLONE_VM | CLONE_FILES | CLONE_IO,
|
.x0 = CLONE_THREAD | CLONE_SIGHAND | CLONE_PTRACE | CLONE_FS | CLONE_VM | CLONE_FILES | CLONE_IO,
|
||||||
.x1 = stackTop,
|
.x1 = stackTop,
|
||||||
.x3 = tlsMem->Map(0, size, memory::Permission{true, true, false}),
|
.x3 = reinterpret_cast<u64>(tlsMem->Map(nullptr, size, memory::Permission{true, true, false})),
|
||||||
.x8 = __NR_clone,
|
.x8 = __NR_clone,
|
||||||
.x5 = reinterpret_cast<u64>(&guest::GuestEntry),
|
.x5 = reinterpret_cast<u64>(&guest::GuestEntry),
|
||||||
.x6 = entryPoint,
|
.x6 = entryPoint,
|
||||||
@ -96,20 +96,15 @@ namespace skyline::kernel::type {
|
|||||||
throw exception("Cannot create thread: Address: 0x{:X}, Stack Top: 0x{:X}", entryPoint, stackTop);
|
throw exception("Cannot create thread: Address: 0x{:X}, Stack Top: 0x{:X}", entryPoint, stackTop);
|
||||||
|
|
||||||
auto pid{static_cast<pid_t>(fregs.x0)};
|
auto pid{static_cast<pid_t>(fregs.x0)};
|
||||||
auto process{NewHandle<KThread>(pid, entryPoint, entryArg, stackTop, GetTlsSlot(), priority, this, tlsMem).item};
|
auto thread{NewHandle<KThread>(pid, entryPoint, entryArg, stackTop, GetTlsSlot(), priority, this, tlsMem).item};
|
||||||
threads[pid] = process;
|
threads[pid] = thread;
|
||||||
|
|
||||||
return process;
|
return thread;
|
||||||
}
|
|
||||||
|
|
||||||
u64 KProcess::GetHostAddress(u64 address) {
|
|
||||||
auto chunk{state.os->memory.GetChunk(address)};
|
|
||||||
return (chunk && chunk->host) ? chunk->host + (address - chunk->address) : 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::ReadMemory(void *destination, u64 offset, size_t size, bool forceGuest) {
|
void KProcess::ReadMemory(void *destination, u64 offset, size_t size, bool forceGuest) {
|
||||||
if (!forceGuest) {
|
if (!forceGuest) {
|
||||||
auto source{GetHostAddress(offset)};
|
auto source{reinterpret_cast<u8*>(offset)};
|
||||||
|
|
||||||
if (source) {
|
if (source) {
|
||||||
std::memcpy(destination, reinterpret_cast<void *>(source), size);
|
std::memcpy(destination, reinterpret_cast<void *>(source), size);
|
||||||
@ -133,7 +128,7 @@ namespace skyline::kernel::type {
|
|||||||
|
|
||||||
void KProcess::WriteMemory(const void *source, u64 offset, size_t size, bool forceGuest) {
|
void KProcess::WriteMemory(const void *source, u64 offset, size_t size, bool forceGuest) {
|
||||||
if (!forceGuest) {
|
if (!forceGuest) {
|
||||||
auto destination{GetHostAddress(offset)};
|
auto destination{reinterpret_cast<u8*>(offset)};
|
||||||
|
|
||||||
if (destination) {
|
if (destination) {
|
||||||
std::memcpy(reinterpret_cast<void *>(destination), source, size);
|
std::memcpy(reinterpret_cast<void *>(destination), source, size);
|
||||||
@ -155,31 +150,7 @@ namespace skyline::kernel::type {
|
|||||||
pwrite64(memFd, source, size, offset);
|
pwrite64(memFd, source, size, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::CopyMemory(u64 source, u64 destination, size_t size) {
|
std::optional<KProcess::HandleOut<KMemory>> KProcess::GetMemoryObject(u8* ptr) {
|
||||||
auto sourceHost{GetHostAddress(source)};
|
|
||||||
auto destinationHost{GetHostAddress(destination)};
|
|
||||||
|
|
||||||
if (sourceHost && destinationHost) {
|
|
||||||
std::memcpy(reinterpret_cast<void *>(destinationHost), reinterpret_cast<const void *>(sourceHost), size);
|
|
||||||
} else {
|
|
||||||
if (size <= PAGE_SIZE) {
|
|
||||||
std::vector<u8> buffer(size);
|
|
||||||
|
|
||||||
ReadMemory(buffer.data(), source, size);
|
|
||||||
WriteMemory(buffer.data(), destination, size);
|
|
||||||
} else {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = source,
|
|
||||||
.x1 = destination,
|
|
||||||
.x2 = size,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Memcopy, fregs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<KProcess::HandleOut<KMemory>> KProcess::GetMemoryObject(u64 address) {
|
|
||||||
for (KHandle index{}; index < handles.size(); index++) {
|
for (KHandle index{}; index < handles.size(); index++) {
|
||||||
auto& object{handles[index]};
|
auto& object{handles[index]};
|
||||||
switch (object->objectType) {
|
switch (object->objectType) {
|
||||||
@ -187,7 +158,7 @@ namespace skyline::kernel::type {
|
|||||||
case type::KType::KSharedMemory:
|
case type::KType::KSharedMemory:
|
||||||
case type::KType::KTransferMemory: {
|
case type::KType::KTransferMemory: {
|
||||||
auto mem{std::static_pointer_cast<type::KMemory>(object)};
|
auto mem{std::static_pointer_cast<type::KMemory>(object)};
|
||||||
if (mem->IsInside(address))
|
if (mem->IsInside(ptr))
|
||||||
return std::make_optional<KProcess::HandleOut<KMemory>>({mem, constant::BaseHandleIndex + index});
|
return std::make_optional<KProcess::HandleOut<KMemory>>({mem, constant::BaseHandleIndex + index});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,27 +33,24 @@ namespace skyline {
|
|||||||
* @url https://switchbrew.org/wiki/Thread_Local_Storage
|
* @url https://switchbrew.org/wiki/Thread_Local_Storage
|
||||||
*/
|
*/
|
||||||
struct TlsPage {
|
struct TlsPage {
|
||||||
u64 address; //!< The address of the page allocated for TLS
|
u8* ptr;
|
||||||
u8 index{}; //!< The slots are assigned sequentially, this holds the index of the last TLS slot reserved
|
u8 index{}; //!< The slots are assigned sequentially, this holds the index of the last TLS slot reserved
|
||||||
bool slot[constant::TlsSlots]{}; //!< An array of booleans denoting which TLS slots are reserved
|
bool slot[constant::TlsSlots]{}; //!< An array of booleans denoting which TLS slots are reserved
|
||||||
|
|
||||||
/**
|
TlsPage(u8* ptr);
|
||||||
* @param address The address of the allocated page
|
|
||||||
*/
|
|
||||||
TlsPage(u64 address);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Reserves a single 0x200 byte TLS slot
|
* @brief Reserves a single 0x200 byte TLS slot
|
||||||
* @return The address of the reserved slot
|
* @return The address of the reserved slot
|
||||||
*/
|
*/
|
||||||
u64 ReserveSlot();
|
u8* ReserveSlot();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Returns the address of a particular slot
|
* @brief Returns the address of a particular slot
|
||||||
* @param slotNo The number of the slot to be returned
|
* @param slotNo The number of the slot to be returned
|
||||||
* @return The address of the specified slot
|
* @return The address of the specified slot
|
||||||
*/
|
*/
|
||||||
u64 Get(u8 slotNo);
|
u8* Get(u8 slotNo);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Returns boolean on if the TLS page has free slots or not
|
* @brief Returns boolean on if the TLS page has free slots or not
|
||||||
@ -65,7 +62,7 @@ namespace skyline {
|
|||||||
/**
|
/**
|
||||||
* @return The address of a free TLS slot
|
* @return The address of a free TLS slot
|
||||||
*/
|
*/
|
||||||
u64 GetTlsSlot();
|
u8* GetTlsSlot();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Initializes heap and the initial TLS page
|
* @brief Initializes heap and the initial TLS page
|
||||||
@ -118,10 +115,8 @@ namespace skyline {
|
|||||||
Mutex conditionalLock; //!< Synchronizes all concurrent guest conditional variable operations
|
Mutex conditionalLock; //!< Synchronizes all concurrent guest conditional variable operations
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Creates a KThread object for the main thread and opens the process's memory file
|
|
||||||
* @param pid The PID of the main thread
|
* @param pid The PID of the main thread
|
||||||
* @param entryPoint The address to start execution at
|
* @param entryPoint The entry point of execution for the guest
|
||||||
* @param stack The KSharedMemory object for Stack memory allocated by the guest process
|
|
||||||
* @param tlsMemory The KSharedMemory object for TLS memory allocated by the guest process
|
* @param tlsMemory The KSharedMemory object for TLS memory allocated by the guest process
|
||||||
*/
|
*/
|
||||||
KProcess(const DeviceState &state, pid_t pid, u64 entryPoint, std::shared_ptr<type::KSharedMemory> &stack, std::shared_ptr<type::KSharedMemory> &tlsMemory);
|
KProcess(const DeviceState &state, pid_t pid, u64 entryPoint, std::shared_ptr<type::KSharedMemory> &stack, std::shared_ptr<type::KSharedMemory> &tlsMemory);
|
||||||
@ -141,13 +136,6 @@ namespace skyline {
|
|||||||
*/
|
*/
|
||||||
std::shared_ptr<KThread> CreateThread(u64 entryPoint, u64 entryArg, u64 stackTop, i8 priority);
|
std::shared_ptr<KThread> CreateThread(u64 entryPoint, u64 entryArg, u64 stackTop, i8 priority);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Returns the host address for a specific address in guest memory
|
|
||||||
* @param address The corresponding guest address
|
|
||||||
* @return The corresponding host address
|
|
||||||
*/
|
|
||||||
u64 GetHostAddress(u64 address);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @tparam Type The type of the pointer to return
|
* @tparam Type The type of the pointer to return
|
||||||
* @param address The address on the guest
|
* @param address The address on the guest
|
||||||
@ -156,52 +144,7 @@ namespace skyline {
|
|||||||
*/
|
*/
|
||||||
template<typename Type>
|
template<typename Type>
|
||||||
inline Type *GetPointer(u64 address) {
|
inline Type *GetPointer(u64 address) {
|
||||||
return reinterpret_cast<Type *>(GetHostAddress(address));
|
return reinterpret_cast<Type *>(address);
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Returns a reference to an object from guest memory
|
|
||||||
*/
|
|
||||||
template<typename Type>
|
|
||||||
inline Type &GetReference(u64 address) {
|
|
||||||
auto source{GetPointer<Type>(address)};
|
|
||||||
if (source)
|
|
||||||
return *source;
|
|
||||||
else
|
|
||||||
throw exception("Cannot retrieve reference to object not in shared guest memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Returns a copy of an object from guest memory
|
|
||||||
* @tparam Type The type of the object to be read
|
|
||||||
* @param address The address of the object
|
|
||||||
* @return A copy of the object from guest memory
|
|
||||||
*/
|
|
||||||
template<typename Type>
|
|
||||||
inline Type GetObject(u64 address) {
|
|
||||||
auto source{GetPointer<Type>(address)};
|
|
||||||
if (source) {
|
|
||||||
return *source;
|
|
||||||
} else {
|
|
||||||
Type item{};
|
|
||||||
ReadMemory(&item, address, sizeof(Type));
|
|
||||||
return item;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Returns a string from guest memory
|
|
||||||
* @param address The address of the object
|
|
||||||
* @param maxSize The maximum size of the string
|
|
||||||
* @return A copy of a string in guest memory
|
|
||||||
*/
|
|
||||||
inline std::string GetString(u64 address, size_t maxSize) {
|
|
||||||
auto source{GetPointer<char>(address)};
|
|
||||||
if (source)
|
|
||||||
return std::string(source, maxSize);
|
|
||||||
std::string debug(maxSize, '\0');
|
|
||||||
ReadMemory(debug.data(), address, maxSize);
|
|
||||||
return debug;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -254,14 +197,6 @@ namespace skyline {
|
|||||||
*/
|
*/
|
||||||
void WriteMemory(const void *source, u64 offset, size_t size, bool forceGuest = false);
|
void WriteMemory(const void *source, u64 offset, size_t size, bool forceGuest = false);
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Copy one chunk to another in the guest's memory
|
|
||||||
* @param source The address of where the data to read is present
|
|
||||||
* @param destination The address to write the read data to
|
|
||||||
* @param size The amount of memory to be copied
|
|
||||||
*/
|
|
||||||
void CopyMemory(u64 source, u64 destination, size_t size);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Creates a new handle to a KObject and adds it to the process handle_table
|
* @brief Creates a new handle to a KObject and adds it to the process handle_table
|
||||||
* @tparam objectClass The class of the kernel object to create
|
* @tparam objectClass The class of the kernel object to create
|
||||||
@ -333,7 +268,7 @@ namespace skyline {
|
|||||||
* @param address The address to look for
|
* @param address The address to look for
|
||||||
* @return A shared pointer to the corresponding KMemory object
|
* @return A shared pointer to the corresponding KMemory object
|
||||||
*/
|
*/
|
||||||
std::optional<HandleOut<KMemory>> GetMemoryObject(u64 address);
|
std::optional<HandleOut<KMemory>> GetMemoryObject(u8* ptr);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Closes a handle in the handle table
|
* @brief Closes a handle in the handle table
|
||||||
|
@ -10,207 +10,69 @@
|
|||||||
#include "KProcess.h"
|
#include "KProcess.h"
|
||||||
|
|
||||||
namespace skyline::kernel::type {
|
namespace skyline::kernel::type {
|
||||||
KSharedMemory::KSharedMemory(const DeviceState &state, u64 address, size_t size, memory::Permission permission, memory::MemoryState memState, int mmapFlags, bool shared) : initialState(memState), KMemory(state, KType::KSharedMemory) {
|
KSharedMemory::KSharedMemory(const DeviceState &state, size_t size, memory::MemoryState memState, KType type) : initialState(memState), KMemory(state, type) {
|
||||||
if (address && !util::PageAligned(address))
|
|
||||||
throw exception("KSharedMemory was created with non-page-aligned address: 0x{:X}", address);
|
|
||||||
|
|
||||||
fd = ASharedMemory_create("KSharedMemory", size);
|
fd = ASharedMemory_create("KSharedMemory", size);
|
||||||
if (fd < 0)
|
if (fd < 0)
|
||||||
throw exception("An error occurred while creating shared memory: {}", fd);
|
throw exception("An error occurred while creating shared memory: {}", fd);
|
||||||
|
|
||||||
address = reinterpret_cast<u64>(mmap(reinterpret_cast<void *>(address), size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | ((address) ? MAP_FIXED : 0) | mmapFlags, fd, 0));
|
kernel.ptr = reinterpret_cast<u8*>(mmap(nullptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, fd, 0));
|
||||||
if (address == reinterpret_cast<u64>(MAP_FAILED))
|
if (kernel.ptr == MAP_FAILED)
|
||||||
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
|
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
|
||||||
|
|
||||||
kernel = {.address = address, .size = size, .permission = permission};
|
|
||||||
|
|
||||||
if (shared) {
|
|
||||||
guest = kernel;
|
|
||||||
|
|
||||||
BlockDescriptor block{
|
|
||||||
.address = address,
|
|
||||||
.size = size,
|
|
||||||
.permission = permission,
|
|
||||||
};
|
|
||||||
|
|
||||||
ChunkDescriptor chunk{
|
|
||||||
.address = address,
|
|
||||||
.host = address,
|
|
||||||
.size = size,
|
|
||||||
.state = initialState,
|
|
||||||
.blockList = {block},
|
|
||||||
};
|
|
||||||
|
|
||||||
state.os->memory.InsertChunk(chunk);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 KSharedMemory::Map(const u64 address, const u64 size, memory::Permission permission) {
|
|
||||||
if (address && !util::PageAligned(address))
|
|
||||||
throw exception("KSharedMemory was mapped to a non-page-aligned address: 0x{:X}", address);
|
|
||||||
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = address,
|
|
||||||
.x1 = size,
|
|
||||||
.x2 = static_cast<u64>(permission.Get()),
|
|
||||||
.x3 = static_cast<u64>(MAP_SHARED | ((address) ? MAP_FIXED : 0)),
|
|
||||||
.x4 = static_cast<u64>(fd),
|
|
||||||
.x8 = __NR_mmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while mapping shared memory in guest");
|
|
||||||
|
|
||||||
guest = {.address = fregs.x0, .size = size, .permission = permission};
|
|
||||||
|
|
||||||
BlockDescriptor block{
|
|
||||||
.address = fregs.x0,
|
|
||||||
.size = size,
|
|
||||||
.permission = permission,
|
|
||||||
};
|
|
||||||
ChunkDescriptor chunk{
|
|
||||||
.address = fregs.x0,
|
|
||||||
.host = kernel.address,
|
|
||||||
.size = size,
|
|
||||||
.state = initialState,
|
|
||||||
.blockList = {block},
|
|
||||||
};
|
|
||||||
state.os->memory.InsertChunk(chunk);
|
|
||||||
|
|
||||||
return fregs.x0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KSharedMemory::Resize(size_t size) {
|
|
||||||
if (guest.Valid() && kernel.Valid()) {
|
|
||||||
if (close(fd) < 0)
|
|
||||||
throw exception("An error occurred while trying to close shared memory FD: {}", strerror(errno));
|
|
||||||
|
|
||||||
fd = ASharedMemory_create("KSharedMemory", size);
|
|
||||||
if (fd < 0)
|
|
||||||
throw exception("An error occurred while creating shared memory: {}", fd);
|
|
||||||
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = guest.address,
|
|
||||||
.x1 = guest.size,
|
|
||||||
.x8 = __NR_munmap
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while unmapping private memory in child process");
|
|
||||||
|
|
||||||
fregs = {
|
|
||||||
.x0 = guest.address,
|
|
||||||
.x1 = size,
|
|
||||||
.x2 = static_cast<u64>(PROT_READ | PROT_WRITE | PROT_EXEC),
|
|
||||||
.x3 = static_cast<u64>(MAP_SHARED | MAP_FIXED),
|
|
||||||
.x4 = static_cast<u64>(fd),
|
|
||||||
.x8 = __NR_mmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while remapping private memory in child process");
|
|
||||||
|
|
||||||
state.process->WriteMemory(reinterpret_cast<void *>(kernel.address), guest.address, std::min(guest.size, size), true);
|
|
||||||
|
|
||||||
auto chunk{state.os->memory.GetChunk(guest.address)};
|
|
||||||
for (const auto &block : chunk->blockList) {
|
|
||||||
if ((block.address - chunk->address) < guest.size) {
|
|
||||||
fregs = {
|
|
||||||
.x0 = block.address,
|
|
||||||
.x1 = std::min(block.size, (chunk->address + size) - block.address),
|
|
||||||
.x2 = static_cast<u64>(block.permission.Get()),
|
|
||||||
.x8 = __NR_mprotect,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while updating private memory's permissions in child process");
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
munmap(reinterpret_cast<void *>(kernel.address), kernel.size);
|
|
||||||
|
|
||||||
auto host{mmap(reinterpret_cast<void *>(chunk->host), size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED, fd, 0)};
|
|
||||||
if (host == MAP_FAILED)
|
|
||||||
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
|
|
||||||
|
|
||||||
guest.size = size;
|
|
||||||
MemoryManager::ResizeChunk(chunk, size);
|
|
||||||
} else if (kernel.Valid()) {
|
|
||||||
if (close(fd) < 0)
|
|
||||||
throw exception("An error occurred while trying to close shared memory FD: {}", strerror(errno));
|
|
||||||
|
|
||||||
fd = ASharedMemory_create("KSharedMemory", size);
|
|
||||||
if (fd < 0)
|
|
||||||
throw exception("An error occurred while creating shared memory: {}", fd);
|
|
||||||
|
|
||||||
std::vector<u8> data(std::min(size, kernel.size));
|
|
||||||
std::memcpy(data.data(), reinterpret_cast<const void *>(kernel.address), std::min(size, kernel.size));
|
|
||||||
|
|
||||||
munmap(reinterpret_cast<void *>(kernel.address), kernel.size);
|
|
||||||
|
|
||||||
auto address{mmap(reinterpret_cast<void *>(kernel.address), size, kernel.permission.Get(), MAP_SHARED, fd, 0)};
|
|
||||||
if (address == MAP_FAILED)
|
|
||||||
throw exception("An occurred while mapping shared memory: {}", strerror(errno));
|
|
||||||
|
|
||||||
std::memcpy(address, data.data(), std::min(size, kernel.size));
|
|
||||||
|
|
||||||
kernel.address = reinterpret_cast<u64>(address);
|
|
||||||
kernel.size = size;
|
kernel.size = size;
|
||||||
} else {
|
|
||||||
throw exception("Cannot resize KSharedMemory that's only on guest");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KSharedMemory::UpdatePermission(u64 address, u64 size, memory::Permission permission, bool host) {
|
u8 *KSharedMemory::Map(u8 *ptr, u64 size, memory::Permission permission) {
|
||||||
if (guest.Valid() && !host) {
|
if (ptr && !util::PageAligned(ptr))
|
||||||
Registers fregs{
|
throw exception("KSharedMemory was mapped to a non-page-aligned address: 0x{:X}", fmt::ptr(ptr));
|
||||||
.x0 = address,
|
|
||||||
.x1 = size,
|
|
||||||
.x2 = static_cast<u64>(permission.Get()),
|
|
||||||
.x8 = __NR_mprotect,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
guest.ptr = reinterpret_cast<u8*>(mmap(ptr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | (ptr ? MAP_FIXED : 0), fd, 0));
|
||||||
if (fregs.x0 < 0)
|
if (guest.ptr == MAP_FAILED)
|
||||||
|
throw exception("An error occurred while mapping shared memory in guest");
|
||||||
|
guest.size = size;
|
||||||
|
|
||||||
|
state.os->memory.InsertChunk(ChunkDescriptor{
|
||||||
|
.ptr = guest.ptr,
|
||||||
|
.size = size,
|
||||||
|
.permission = permission,
|
||||||
|
.state = initialState,
|
||||||
|
});
|
||||||
|
|
||||||
|
return guest.ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSharedMemory::UpdatePermission(u8* ptr, size_t size, memory::Permission permission) {
|
||||||
|
if (ptr && !util::PageAligned(ptr))
|
||||||
|
throw exception("KSharedMemory permission updated with a non-page-aligned address: 0x{:X}", fmt::ptr(ptr));
|
||||||
|
|
||||||
|
if (guest.Valid()) {
|
||||||
|
mprotect(ptr, size, permission.Get());
|
||||||
|
|
||||||
|
if (guest.ptr == MAP_FAILED)
|
||||||
throw exception("An error occurred while updating shared memory's permissions in guest");
|
throw exception("An error occurred while updating shared memory's permissions in guest");
|
||||||
|
|
||||||
auto chunk{state.os->memory.GetChunk(address)};
|
state.os->memory.InsertChunk(ChunkDescriptor{
|
||||||
BlockDescriptor block{
|
.ptr = ptr,
|
||||||
.address = address,
|
|
||||||
.size = size,
|
.size = size,
|
||||||
.permission = permission,
|
.permission = permission,
|
||||||
};
|
.state = initialState,
|
||||||
MemoryManager::InsertBlock(chunk, block);
|
});
|
||||||
}
|
|
||||||
if (kernel.Valid() && host) {
|
|
||||||
if (mprotect(reinterpret_cast<void *>(kernel.address), kernel.size, permission.Get()) == reinterpret_cast<u64>(MAP_FAILED))
|
|
||||||
throw exception("An error occurred while remapping shared memory: {}", strerror(errno));
|
|
||||||
kernel.permission = permission;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KSharedMemory::~KSharedMemory() {
|
KSharedMemory::~KSharedMemory() {
|
||||||
try {
|
|
||||||
if (guest.Valid() && state.process) {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = guest.address,
|
|
||||||
.x1 = guest.size,
|
|
||||||
.x8 = __NR_munmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
}
|
|
||||||
} catch (const std::exception &) {
|
|
||||||
}
|
|
||||||
if (kernel.Valid())
|
if (kernel.Valid())
|
||||||
munmap(reinterpret_cast<void *>(kernel.address), kernel.size);
|
munmap(kernel.ptr, kernel.size);
|
||||||
state.os->memory.DeleteChunk(guest.address);
|
|
||||||
|
if (guest.Valid()) {
|
||||||
|
munmap(guest.ptr, guest.size);
|
||||||
|
state.os->memory.InsertChunk(ChunkDescriptor{
|
||||||
|
.ptr = guest.ptr,
|
||||||
|
.size = guest.size,
|
||||||
|
.state = memory::states::Unmapped,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
close(fd);
|
close(fd);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -7,83 +7,36 @@
|
|||||||
|
|
||||||
namespace skyline::kernel::type {
|
namespace skyline::kernel::type {
|
||||||
/**
|
/**
|
||||||
* @brief KSharedMemory is used to hold a particular amount of shared memory
|
* @brief KSharedMemory is used to retain two mappings of the same underlying memory, allowing persistence of the memory
|
||||||
*/
|
*/
|
||||||
class KSharedMemory : public KMemory {
|
class KSharedMemory : public KMemory {
|
||||||
private:
|
private:
|
||||||
int fd; //!< A file descriptor to the underlying shared memory
|
int fd; //!< A file descriptor to the underlying shared memory
|
||||||
memory::MemoryState initialState; //!< The initial state is stored for the Map call
|
memory::MemoryState initialState;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
struct MapInfo {
|
struct MapInfo {
|
||||||
u64 address;
|
u8 *ptr;
|
||||||
size_t size;
|
size_t size;
|
||||||
memory::Permission permission;
|
|
||||||
|
|
||||||
constexpr bool Valid() {
|
constexpr bool Valid() {
|
||||||
return address && size && permission.Get();
|
return ptr && size;
|
||||||
}
|
}
|
||||||
} kernel, guest;
|
} kernel, guest;
|
||||||
|
|
||||||
/**
|
KSharedMemory(const DeviceState &state, size_t size, memory::MemoryState memState = memory::states::SharedMemory, KType type = KType::KSharedMemory);
|
||||||
* @param address The address of the allocation on the kernel (If NULL then an arbitrary address is picked)
|
|
||||||
* @param size The size of the allocation on the kernel
|
|
||||||
* @param permission The permission of the kernel process
|
|
||||||
* @param memState The MemoryState of the chunk of memory
|
|
||||||
* @param mmapFlags Additional flags to pass to mmap
|
|
||||||
*/
|
|
||||||
KSharedMemory(const DeviceState &state, u64 address, size_t size, memory::Permission permission, memory::MemoryState memState = memory::states::SharedMemory, int mmapFlags = 0, bool shared = false);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Maps the shared memory in the guest
|
* @param ptr The address to map to (If NULL an arbitrary address is picked, it may be outside of the HOS address space)
|
||||||
* @param address The address to map to (If NULL an arbitrary address is picked)
|
|
||||||
* @param size The amount of shared memory to map
|
|
||||||
* @param permission The permission of the kernel process
|
|
||||||
* @return The address of the allocation
|
* @return The address of the allocation
|
||||||
*/
|
*/
|
||||||
u64 Map(u64 address, u64 size, memory::Permission permission);
|
u8 *Map(u8 *ptr, u64 size, memory::Permission permission);
|
||||||
|
|
||||||
/**
|
inline span<u8> Get() override {
|
||||||
* @brief Resize a chunk of memory as to change the size occupied by it
|
return span(guest.ptr, guest.size);
|
||||||
* @param size The new size of the memory
|
|
||||||
*/
|
|
||||||
virtual void Resize(size_t size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Updates the permissions of a block of mapped memory
|
|
||||||
* @param address The starting address to change the permissions at
|
|
||||||
* @param size The size of the partition to change the permissions of
|
|
||||||
* @param permission The new permissions to be set for the memory
|
|
||||||
* @param host Set the permissions for the kernel rather than the guest
|
|
||||||
*/
|
|
||||||
void UpdatePermission(u64 address, u64 size, memory::Permission permission, bool host = false);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Updates the permissions of a block of mapped memory
|
|
||||||
* @param address The starting address to change the permissions at
|
|
||||||
* @param size The size of the partition to change the permissions of
|
|
||||||
* @param permission The new permissions to be set for the memory
|
|
||||||
*/
|
|
||||||
virtual void UpdatePermission(u64 address, u64 size, memory::Permission permission) {
|
|
||||||
UpdatePermission(address, size, permission, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
void UpdatePermission(u8* ptr, size_t size, memory::Permission permission) override;
|
||||||
* @brief Updates the permissions of a chunk of mapped memory
|
|
||||||
* @param permission The new permissions to be set for the memory
|
|
||||||
*/
|
|
||||||
inline virtual void UpdatePermission(memory::Permission permission) {
|
|
||||||
UpdatePermission(guest.address, guest.size, permission, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Checks if the specified address is within the guest memory object
|
|
||||||
* @param address The address to check
|
|
||||||
* @return If the address is inside the guest memory object
|
|
||||||
*/
|
|
||||||
inline virtual bool IsInside(u64 address) {
|
|
||||||
return (guest.address <= address) && ((guest.address + guest.size) > address);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief The destructor of shared memory, it deallocates the memory from all processes
|
* @brief The destructor of shared memory, it deallocates the memory from all processes
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include "KProcess.h"
|
#include "KProcess.h"
|
||||||
|
|
||||||
namespace skyline::kernel::type {
|
namespace skyline::kernel::type {
|
||||||
KThread::KThread(const DeviceState &state, KHandle handle, pid_t selfTid, u64 entryPoint, u64 entryArg, u64 stackTop, u64 tls, i8 priority, KProcess *parent, const std::shared_ptr<type::KSharedMemory> &tlsMemory) : handle(handle), tid(selfTid), entryPoint(entryPoint), entryArg(entryArg), stackTop(stackTop), tls(tls), priority(priority), parent(parent), ctxMemory(tlsMemory), KSyncObject(state,
|
KThread::KThread(const DeviceState &state, KHandle handle, pid_t selfTid, u64 entryPoint, u64 entryArg, u64 stackTop, u8* tls, i8 priority, KProcess *parent, const std::shared_ptr<type::KSharedMemory> &tlsMemory) : handle(handle), tid(selfTid), entryPoint(entryPoint), entryArg(entryArg), stackTop(stackTop), tls(tls), priority(priority), parent(parent), ctxMemory(tlsMemory), KSyncObject(state,
|
||||||
KType::KThread) {
|
KType::KThread) {
|
||||||
UpdatePriority(priority);
|
UpdatePriority(priority);
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ namespace skyline::kernel::type {
|
|||||||
KHandle handle; // The handle of the object in the handle table
|
KHandle handle; // The handle of the object in the handle table
|
||||||
pid_t tid; //!< The Linux Thread ID of the current thread
|
pid_t tid; //!< The Linux Thread ID of the current thread
|
||||||
u64 stackTop; //!< The top of the stack (Where it starts growing downwards from)
|
u64 stackTop; //!< The top of the stack (Where it starts growing downwards from)
|
||||||
u64 tls; //!< The address of TLS (Thread Local Storage) slot assigned to the current thread
|
u8* tls; //!< The address of TLS (Thread Local Storage) slot assigned to the current thread
|
||||||
i8 priority; //!< The priority of a thread in Nintendo format
|
i8 priority; //!< The priority of a thread in Nintendo format
|
||||||
|
|
||||||
Priority androidPriority{19, -8}; //!< The range of priorities for Android
|
Priority androidPriority{19, -8}; //!< The range of priorities for Android
|
||||||
@ -69,7 +69,7 @@ namespace skyline::kernel::type {
|
|||||||
* @param parent The parent process of this thread
|
* @param parent The parent process of this thread
|
||||||
* @param tlsMemory The KSharedMemory object for TLS memory allocated by the guest process
|
* @param tlsMemory The KSharedMemory object for TLS memory allocated by the guest process
|
||||||
*/
|
*/
|
||||||
KThread(const DeviceState &state, KHandle handle, pid_t selfTid, u64 entryPoint, u64 entryArg, u64 stackTop, u64 tls, i8 priority, KProcess *parent, const std::shared_ptr<type::KSharedMemory> &tlsMemory);
|
KThread(const DeviceState &state, KHandle handle, pid_t selfTid, u64 entryPoint, u64 entryArg, u64 stackTop, u8* tls, i8 priority, KProcess *parent, const std::shared_ptr<type::KSharedMemory> &tlsMemory);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Kills the thread and deallocates the memory allocated for stack.
|
* @brief Kills the thread and deallocates the memory allocated for stack.
|
||||||
|
@ -8,216 +8,8 @@
|
|||||||
#include "KTransferMemory.h"
|
#include "KTransferMemory.h"
|
||||||
|
|
||||||
namespace skyline::kernel::type {
|
namespace skyline::kernel::type {
|
||||||
KTransferMemory::KTransferMemory(const DeviceState &state, bool host, u64 address, size_t size, memory::Permission permission, memory::MemoryState memState) : host(host), size(size), KMemory(state, KType::KTransferMemory) {
|
KTransferMemory::KTransferMemory(const DeviceState &state, u8* ptr, size_t size, memory::Permission permission, memory::MemoryState memState) : KSharedMemory(state, size, memState, KType::KTransferMemory) {
|
||||||
if (address && !util::PageAligned(address))
|
std::memcpy(kernel.ptr, ptr, size);
|
||||||
throw exception("KTransferMemory was created with non-page-aligned address: 0x{:X}", address);
|
Map(ptr, size, permission);
|
||||||
|
|
||||||
BlockDescriptor block{
|
|
||||||
.size = size,
|
|
||||||
.permission = permission,
|
|
||||||
};
|
|
||||||
ChunkDescriptor chunk{
|
|
||||||
.size = size,
|
|
||||||
.state = memState,
|
|
||||||
.blockList = {block},
|
|
||||||
};
|
|
||||||
|
|
||||||
if (host) {
|
|
||||||
address = reinterpret_cast<u64>(mmap(reinterpret_cast<void *>(address), size, permission.Get(), MAP_ANONYMOUS | MAP_PRIVATE | ((address) ? MAP_FIXED : 0), -1, 0));
|
|
||||||
if (reinterpret_cast<void *>(address) == MAP_FAILED)
|
|
||||||
throw exception("An error occurred while mapping transfer memory in host");
|
|
||||||
|
|
||||||
this->address = address;
|
|
||||||
chunk.address = address;
|
|
||||||
chunk.blockList.front().address = address;
|
|
||||||
hostChunk = chunk;
|
|
||||||
} else {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = address,
|
|
||||||
.x1 = size,
|
|
||||||
.x2 = static_cast<u64 >(permission.Get()),
|
|
||||||
.x3 = static_cast<u64>(MAP_ANONYMOUS | MAP_PRIVATE | ((address) ? MAP_FIXED : 0)),
|
|
||||||
.x4 = static_cast<u64>(-1),
|
|
||||||
.x8 = __NR_mmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while mapping shared region in child process");
|
|
||||||
|
|
||||||
this->address = fregs.x0;
|
|
||||||
chunk.address = fregs.x0;
|
|
||||||
chunk.blockList.front().address = fregs.x0;
|
|
||||||
|
|
||||||
state.os->memory.InsertChunk(chunk);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 KTransferMemory::Transfer(bool mHost, u64 nAddress, u64 nSize) {
|
|
||||||
if (nAddress && !util::PageAligned(nAddress))
|
|
||||||
throw exception("KTransferMemory was transferred to a non-page-aligned address: 0x{:X}", nAddress);
|
|
||||||
|
|
||||||
nSize = nSize ? nSize : size;
|
|
||||||
|
|
||||||
auto chunk{host ? hostChunk : *state.os->memory.GetChunk(address)};
|
|
||||||
chunk.address = nAddress;
|
|
||||||
chunk.size = nSize;
|
|
||||||
MemoryManager::ResizeChunk(&chunk, nSize);
|
|
||||||
|
|
||||||
for (auto &block : chunk.blockList) {
|
|
||||||
block.address = nAddress + (block.address - address);
|
|
||||||
|
|
||||||
if ((mHost && !host) || (!mHost && !host)) {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = block.address,
|
|
||||||
.x1 = block.size,
|
|
||||||
.x2 = (block.permission.w) ? static_cast<u64>(block.permission.Get()) : (PROT_READ | PROT_WRITE),
|
|
||||||
.x3 = static_cast<u64>(MAP_ANONYMOUS | MAP_PRIVATE | ((nAddress) ? MAP_FIXED : 0)),
|
|
||||||
.x4 = static_cast<u64>(-1),
|
|
||||||
.x8 = __NR_mmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while mapping transfer memory in child process");
|
|
||||||
|
|
||||||
nAddress = fregs.x0;
|
|
||||||
} else if ((!mHost && host) || (mHost && host)) {
|
|
||||||
nAddress = reinterpret_cast<u64>(mmap(reinterpret_cast<void *>(block.address), block.size, block.permission.Get(), MAP_ANONYMOUS | MAP_PRIVATE | ((nAddress) ? MAP_FIXED : 0), -1, 0));
|
|
||||||
if (reinterpret_cast<void *>(nAddress) == MAP_FAILED)
|
|
||||||
throw exception("An error occurred while mapping transfer memory in host");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (block.permission.r) {
|
|
||||||
if (mHost && !host)
|
|
||||||
state.process->ReadMemory(reinterpret_cast<void *>(nAddress), address, block.size);
|
|
||||||
else if (!mHost && host)
|
|
||||||
state.process->WriteMemory(reinterpret_cast<void *>(address), nAddress, block.size);
|
|
||||||
else if (!mHost && !host)
|
|
||||||
state.process->CopyMemory(address, nAddress, block.size);
|
|
||||||
else if (mHost && host)
|
|
||||||
std::memcpy(reinterpret_cast<void *>(nAddress), reinterpret_cast<void *>(address), block.size);
|
|
||||||
}
|
|
||||||
if (!block.permission.w) {
|
|
||||||
if (mHost) {
|
|
||||||
if (mprotect(reinterpret_cast<void *>(block.address), block.size, block.permission.Get()) == reinterpret_cast<u64>(MAP_FAILED))
|
|
||||||
throw exception("An error occurred while remapping transfer memory: {}", strerror(errno));
|
|
||||||
} else {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = block.address,
|
|
||||||
.x1 = block.size,
|
|
||||||
.x2 = static_cast<u64>(block.permission.Get()),
|
|
||||||
.x8 = __NR_mprotect,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while updating transfer memory's permissions in guest");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mHost && !host) {
|
|
||||||
state.os->memory.DeleteChunk(address);
|
|
||||||
hostChunk = chunk;
|
|
||||||
} else if (!mHost && host) {
|
|
||||||
state.os->memory.InsertChunk(chunk);
|
|
||||||
} else if (mHost && host) {
|
|
||||||
hostChunk = chunk;
|
|
||||||
} else if (!mHost && !host) {
|
|
||||||
state.os->memory.DeleteChunk(address);
|
|
||||||
state.os->memory.InsertChunk(chunk);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((mHost && !host) || (!mHost && !host)) {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = address,
|
|
||||||
.x1 = size,
|
|
||||||
.x8 = __NR_munmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while unmapping transfer memory in child process");
|
|
||||||
} else if ((!mHost && host) || (mHost && host)) {
|
|
||||||
if (reinterpret_cast<void *>(munmap(reinterpret_cast<void *>(address), size)) == MAP_FAILED)
|
|
||||||
throw exception("An error occurred while unmapping transfer memory in host: {}");
|
|
||||||
}
|
|
||||||
|
|
||||||
host = mHost;
|
|
||||||
address = nAddress;
|
|
||||||
size = nSize;
|
|
||||||
return address;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KTransferMemory::Resize(size_t nSize) {
|
|
||||||
if (host) {
|
|
||||||
if (mremap(reinterpret_cast<void *>(address), size, nSize, 0) == MAP_FAILED)
|
|
||||||
throw exception("An error occurred while remapping transfer memory in host: {}", strerror(errno));
|
|
||||||
} else {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = address,
|
|
||||||
.x1 = size,
|
|
||||||
.x2 = nSize,
|
|
||||||
.x8 = __NR_mremap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while remapping transfer memory in guest");
|
|
||||||
|
|
||||||
size = nSize;
|
|
||||||
|
|
||||||
auto chunk{state.os->memory.GetChunk(address)};
|
|
||||||
MemoryManager::ResizeChunk(chunk, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KTransferMemory::UpdatePermission(u64 address, u64 size, memory::Permission permission) {
|
|
||||||
BlockDescriptor block{
|
|
||||||
.address = address,
|
|
||||||
.size = size,
|
|
||||||
.permission = permission,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (host) {
|
|
||||||
if (mprotect(reinterpret_cast<void *>(address), size, permission.Get()) == reinterpret_cast<u64>(MAP_FAILED))
|
|
||||||
throw exception("An occurred while remapping transfer memory: {}", strerror(errno));
|
|
||||||
|
|
||||||
MemoryManager::InsertBlock(&hostChunk, block);
|
|
||||||
} else {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = address,
|
|
||||||
.x1 = size,
|
|
||||||
.x2 = static_cast<u64>(permission.Get()),
|
|
||||||
.x8 = __NR_mprotect,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
if (fregs.x0 < 0)
|
|
||||||
throw exception("An error occurred while updating transfer memory's permissions in guest");
|
|
||||||
|
|
||||||
auto chunk{state.os->memory.GetChunk(address)};
|
|
||||||
MemoryManager::InsertBlock(chunk, block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
KTransferMemory::~KTransferMemory() {
|
|
||||||
if (host) {
|
|
||||||
munmap(reinterpret_cast<void *>(address), size);
|
|
||||||
} else if (state.process) {
|
|
||||||
try {
|
|
||||||
Registers fregs{
|
|
||||||
.x0 = address,
|
|
||||||
.x1 = size,
|
|
||||||
.x8 = __NR_munmap,
|
|
||||||
};
|
|
||||||
|
|
||||||
state.nce->ExecuteFunction(ThreadCall::Syscall, fregs);
|
|
||||||
|
|
||||||
state.os->memory.DeleteChunk(address);
|
|
||||||
} catch (const std::exception &) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -7,70 +7,10 @@
|
|||||||
|
|
||||||
namespace skyline::kernel::type {
|
namespace skyline::kernel::type {
|
||||||
/**
|
/**
|
||||||
* @brief KTransferMemory is used to hold a particular amount of transferable memory
|
* @brief KTransferMemory is used to transfer memory from one application to another on HOS, we emulate this abstraction using KSharedMemory as it's functionally indistinguishable for the guest and allows access from the kernel regardless of if it's mapped on the guest
|
||||||
*/
|
*/
|
||||||
class KTransferMemory : public KMemory {
|
class KTransferMemory : public KSharedMemory {
|
||||||
private:
|
|
||||||
ChunkDescriptor hostChunk{};
|
|
||||||
public:
|
public:
|
||||||
bool host; //!< If the memory is mapped on the host or the guest
|
KTransferMemory(const DeviceState &state, u8 *ptr, size_t size, memory::Permission permission, memory::MemoryState memState = memory::states::TransferMemory);
|
||||||
u64 address; //!< The current address of the allocated memory for the kernel
|
|
||||||
size_t size; //!< The current size of the allocated memory
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param host If to map the memory on host or guest
|
|
||||||
* @param address The address to map to (If NULL an arbitrary address is picked)
|
|
||||||
* @param size The size of the allocation
|
|
||||||
* @param permission The permissions of the memory
|
|
||||||
* @param type The type of the memory
|
|
||||||
* @param memState The MemoryState of the chunk of memory
|
|
||||||
*/
|
|
||||||
KTransferMemory(const DeviceState &state, bool host, u64 address, size_t size, memory::Permission permission, memory::MemoryState memState = memory::states::TransferMemory);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Transfers this piece of memory to another process
|
|
||||||
* @param host If to transfer memory to host or guest
|
|
||||||
* @param address The address to map to (If NULL an arbitrary address is picked)
|
|
||||||
* @param size The amount of shared memory to map
|
|
||||||
* @return The address of the allocation
|
|
||||||
*/
|
|
||||||
u64 Transfer(bool host, u64 address, u64 size = 0);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Remap a chunk of memory as to change the size occupied by it
|
|
||||||
* @param size The new size of the memory
|
|
||||||
* @return The address the memory was remapped to
|
|
||||||
*/
|
|
||||||
virtual void Resize(size_t size);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Updates the permissions of a block of mapped memory
|
|
||||||
* @param address The starting address to change the permissions at
|
|
||||||
* @param size The size of the partition to change the permissions of
|
|
||||||
* @param permission The new permissions to be set for the memory
|
|
||||||
*/
|
|
||||||
virtual void UpdatePermission(u64 address, u64 size, memory::Permission permission);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Updates the permissions of a chunk of mapped memory
|
|
||||||
* @param permission The new permissions to be set for the memory
|
|
||||||
*/
|
|
||||||
inline virtual void UpdatePermission(memory::Permission permission) {
|
|
||||||
UpdatePermission(address, size, permission);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Checks if the specified address is within the memory object
|
|
||||||
* @param address The address to check
|
|
||||||
* @return If the address is inside the memory object
|
|
||||||
*/
|
|
||||||
inline virtual bool IsInside(u64 address) {
|
|
||||||
return (this->address <= address) && ((this->address + this->size) > address);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief The destructor of private memory, it deallocates the memory
|
|
||||||
*/
|
|
||||||
~KTransferMemory();
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
namespace skyline::loader {
|
namespace skyline::loader {
|
||||||
Loader::ExecutableLoadInfo Loader::LoadExecutable(const std::shared_ptr<kernel::type::KProcess> process, const DeviceState &state, Executable &executable, size_t offset) {
|
Loader::ExecutableLoadInfo Loader::LoadExecutable(const std::shared_ptr<kernel::type::KProcess> process, const DeviceState &state, Executable &executable, size_t offset) {
|
||||||
u64 base{constant::BaseAddress + offset};
|
u8* base{reinterpret_cast<u8*>(constant::BaseAddress + offset)};
|
||||||
|
|
||||||
u64 textSize{executable.text.contents.size()};
|
u64 textSize{executable.text.contents.size()};
|
||||||
u64 roSize{executable.ro.contents.size()};
|
u64 roSize{executable.ro.contents.size()};
|
||||||
@ -23,7 +23,7 @@ namespace skyline::loader {
|
|||||||
|
|
||||||
// The data section will always be the last section in memory, so put the patch section after it
|
// The data section will always be the last section in memory, so put the patch section after it
|
||||||
u64 patchOffset{executable.data.offset + dataSize};
|
u64 patchOffset{executable.data.offset + dataSize};
|
||||||
std::vector<u32> patch = state.nce->PatchCode(executable.text.contents, base, patchOffset);
|
std::vector<u32> patch = state.nce->PatchCode(executable.text.contents, reinterpret_cast<u64>(base), patchOffset);
|
||||||
|
|
||||||
u64 patchSize{patch.size() * sizeof(u32)};
|
u64 patchSize{patch.size() * sizeof(u32)};
|
||||||
u64 padding{util::AlignUp(patchSize, PAGE_SIZE) - patchSize};
|
u64 padding{util::AlignUp(patchSize, PAGE_SIZE) - patchSize};
|
||||||
@ -40,11 +40,11 @@ namespace skyline::loader {
|
|||||||
process->NewHandle<kernel::type::KPrivateMemory>(base + patchOffset, patchSize + padding, memory::Permission{true, true, true}, memory::states::CodeMutable); // RWX
|
process->NewHandle<kernel::type::KPrivateMemory>(base + patchOffset, patchSize + padding, memory::Permission{true, true, true}, memory::states::CodeMutable); // RWX
|
||||||
state.logger->Debug("Successfully mapped section .patch @ 0x{0:X}, Size = 0x{1:X}", base + patchOffset, patchSize + padding);
|
state.logger->Debug("Successfully mapped section .patch @ 0x{0:X}, Size = 0x{1:X}", base + patchOffset, patchSize + padding);
|
||||||
|
|
||||||
process->WriteMemory(executable.text.contents.data(), base + executable.text.offset, textSize);
|
process->WriteMemory(executable.text.contents.data(), reinterpret_cast<u64>(base + executable.text.offset), textSize);
|
||||||
process->WriteMemory(executable.ro.contents.data(), base + executable.ro.offset, roSize);
|
process->WriteMemory(executable.ro.contents.data(), reinterpret_cast<u64>(base + executable.ro.offset), roSize);
|
||||||
process->WriteMemory(executable.data.contents.data(), base + executable.data.offset, dataSize - executable.bssSize);
|
process->WriteMemory(executable.data.contents.data(), reinterpret_cast<u64>(base + executable.data.offset), dataSize - executable.bssSize);
|
||||||
process->WriteMemory(patch.data(), base + patchOffset, patchSize);
|
process->WriteMemory(patch.data(), reinterpret_cast<u64>(base + patchOffset), patchSize);
|
||||||
|
|
||||||
return {base, patchOffset + patchSize + padding};
|
return {reinterpret_cast<u64>(base), patchOffset + patchSize + padding};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ namespace skyline {
|
|||||||
state.jvm->AttachThread();
|
state.jvm->AttachThread();
|
||||||
try {
|
try {
|
||||||
state.thread = state.process->threads.at(thread);
|
state.thread = state.process->threads.at(thread);
|
||||||
state.ctx = reinterpret_cast<ThreadContext *>(state.thread->ctxMemory->kernel.address);
|
state.ctx = reinterpret_cast<ThreadContext *>(state.thread->ctxMemory->kernel.ptr);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
asm("yield");
|
asm("yield");
|
||||||
@ -133,7 +133,7 @@ namespace skyline {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void NCE::ExecuteFunction(ThreadCall call, Registers &funcRegs, std::shared_ptr<kernel::type::KThread> &thread) {
|
void NCE::ExecuteFunction(ThreadCall call, Registers &funcRegs, std::shared_ptr<kernel::type::KThread> &thread) {
|
||||||
ExecuteFunctionCtx(call, funcRegs, reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.address));
|
ExecuteFunctionCtx(call, funcRegs, reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void NCE::ExecuteFunction(ThreadCall call, Registers &funcRegs) {
|
void NCE::ExecuteFunction(ThreadCall call, Registers &funcRegs) {
|
||||||
@ -141,19 +141,19 @@ namespace skyline {
|
|||||||
throw exception("Executing function on Exiting process");
|
throw exception("Executing function on Exiting process");
|
||||||
|
|
||||||
auto thread{state.thread ? state.thread : state.process->threads.at(state.process->pid)};
|
auto thread{state.thread ? state.thread : state.process->threads.at(state.process->pid)};
|
||||||
ExecuteFunctionCtx(call, funcRegs, reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.address));
|
ExecuteFunctionCtx(call, funcRegs, reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void NCE::WaitThreadInit(std::shared_ptr<kernel::type::KThread> &thread) __attribute__ ((optnone)) {
|
void NCE::WaitThreadInit(std::shared_ptr<kernel::type::KThread> &thread) __attribute__ ((optnone)) {
|
||||||
auto ctx{reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.address)};
|
auto ctx{reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.ptr)};
|
||||||
while (ctx->state == ThreadState::NotReady);
|
while (ctx->state == ThreadState::NotReady);
|
||||||
}
|
}
|
||||||
|
|
||||||
void NCE::StartThread(u64 entryArg, u32 handle, std::shared_ptr<kernel::type::KThread> &thread) {
|
void NCE::StartThread(u64 entryArg, u32 handle, std::shared_ptr<kernel::type::KThread> &thread) {
|
||||||
auto ctx{reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.address)};
|
auto ctx{reinterpret_cast<ThreadContext *>(thread->ctxMemory->kernel.ptr)};
|
||||||
while (ctx->state != ThreadState::WaitInit);
|
while (ctx->state != ThreadState::WaitInit);
|
||||||
|
|
||||||
ctx->tpidrroEl0 = thread->tls;
|
ctx->tpidrroEl0 = reinterpret_cast<u64>(thread->tls);
|
||||||
ctx->registers.x0 = entryArg;
|
ctx->registers.x0 = entryArg;
|
||||||
ctx->registers.x1 = handle;
|
ctx->registers.x1 = handle;
|
||||||
ctx->state = ThreadState::WaitRun;
|
ctx->state = ThreadState::WaitRun;
|
||||||
|
@ -40,21 +40,21 @@ namespace skyline::kernel {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<type::KProcess> OS::CreateProcess(u64 entry, u64 argument, size_t stackSize) {
|
std::shared_ptr<type::KProcess> OS::CreateProcess(u64 entry, u64 argument, size_t stackSize) {
|
||||||
auto stack{std::make_shared<type::KSharedMemory>(state, memory.stack.address, stackSize, memory::Permission{true, true, false}, memory::states::Stack, MAP_NORESERVE | MAP_STACK, true)};
|
auto stack{std::make_shared<type::KSharedMemory>(state, stackSize, memory::states::Stack)};
|
||||||
stack->guest = stack->kernel;
|
stack->guest = stack->kernel;
|
||||||
|
|
||||||
if (mprotect(reinterpret_cast<void *>(stack->guest.address), PAGE_SIZE, PROT_NONE))
|
if (mprotect(stack->guest.ptr, PAGE_SIZE, PROT_NONE))
|
||||||
throw exception("Failed to create guard pages");
|
throw exception("Failed to create guard pages");
|
||||||
|
|
||||||
auto tlsMem{std::make_shared<type::KSharedMemory>(state, 0, (sizeof(ThreadContext) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1), memory::Permission{true, true, false}, memory::states::Reserved)};
|
auto tlsMem{std::make_shared<type::KSharedMemory>(state, (sizeof(ThreadContext) + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1), memory::states::Reserved)};
|
||||||
tlsMem->guest = tlsMem->kernel;
|
tlsMem->guest = tlsMem->kernel;
|
||||||
|
|
||||||
auto pid{clone(reinterpret_cast<int (*)(void *)>(&guest::GuestEntry), reinterpret_cast<void *>(stack->guest.address + stackSize), CLONE_FILES | CLONE_FS | CLONE_SETTLS | SIGCHLD, reinterpret_cast<void *>(entry), nullptr, reinterpret_cast<void *>(tlsMem->guest.address))};
|
auto pid{clone(reinterpret_cast<int (*)(void *)>(&guest::GuestEntry), stack->guest.ptr + stackSize, CLONE_FILES | CLONE_FS | CLONE_SETTLS | SIGCHLD, reinterpret_cast<void *>(entry), nullptr, tlsMem->guest.ptr)};
|
||||||
if (pid == -1)
|
if (pid == -1)
|
||||||
throw exception("Call to clone() has failed: {}", strerror(errno));
|
throw exception("Call to clone() has failed: {}", strerror(errno));
|
||||||
|
|
||||||
state.logger->Debug("Successfully created process with PID: {}", pid);
|
state.logger->Debug("Successfully created process with PID: {}", pid);
|
||||||
return std::make_shared<kernel::type::KProcess>(state, pid, argument, stack, tlsMem);
|
return std::make_shared<kernel::type::KProcess>(state, pid, entry, stack, tlsMem);
|
||||||
}
|
}
|
||||||
|
|
||||||
void OS::KillThread(pid_t pid) {
|
void OS::KillThread(pid_t pid) {
|
||||||
|
@ -27,7 +27,7 @@ namespace skyline::kernel {
|
|||||||
OS(std::shared_ptr<JvmManager> &jvmManager, std::shared_ptr<Logger> &logger, std::shared_ptr<Settings> &settings, const std::string &appFilesPath);
|
OS(std::shared_ptr<JvmManager> &jvmManager, std::shared_ptr<Logger> &logger, std::shared_ptr<Settings> &settings, const std::string &appFilesPath);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Execute a particular ROM file. This launches the main process and calls the NCE class to handle execution.
|
* @brief Execute a particular ROM file
|
||||||
* @param romFd A FD to the ROM file to execute
|
* @param romFd A FD to the ROM file to execute
|
||||||
* @param romType The type of the ROM file
|
* @param romType The type of the ROM file
|
||||||
*/
|
*/
|
||||||
|
@ -17,28 +17,30 @@ namespace skyline::service::pl {
|
|||||||
size_t offset; //!< The offset of the font in shared memory
|
size_t offset; //!< The offset of the font in shared memory
|
||||||
};
|
};
|
||||||
|
|
||||||
std::array<FontEntry, 6> fontTable{{
|
std::array<FontEntry, 6> fontTable{
|
||||||
|
{
|
||||||
{FontChineseSimplified, FontExtendedChineseSimplifiedLength},
|
{FontChineseSimplified, FontExtendedChineseSimplifiedLength},
|
||||||
{FontChineseTraditional, FontChineseTraditionalLength},
|
{FontChineseTraditional, FontChineseTraditionalLength},
|
||||||
{FontExtendedChineseSimplified, FontExtendedChineseSimplifiedLength},
|
{FontExtendedChineseSimplified, FontExtendedChineseSimplifiedLength},
|
||||||
{FontKorean, FontKoreanLength},
|
{FontKorean, FontKoreanLength},
|
||||||
{FontNintendoExtended, FontNintendoExtendedLength},
|
{FontNintendoExtended, FontNintendoExtendedLength},
|
||||||
{FontStandard, FontStandardLength}
|
{FontStandard, FontStandardLength}
|
||||||
}};
|
}
|
||||||
|
};
|
||||||
|
|
||||||
IPlatformServiceManager::IPlatformServiceManager(const DeviceState &state, ServiceManager &manager) : fontSharedMem(std::make_shared<kernel::type::KSharedMemory>(state, NULL, constant::FontSharedMemSize, memory::Permission{true, false, false})), BaseService(state, manager) {
|
IPlatformServiceManager::IPlatformServiceManager(const DeviceState &state, ServiceManager &manager) : fontSharedMem(std::make_shared<kernel::type::KSharedMemory>(state, constant::FontSharedMemSize)), BaseService(state, manager) {
|
||||||
constexpr u32 SharedFontResult{0x7F9A0218}; //!< The decrypted magic for a single font in the shared font data
|
constexpr u32 SharedFontResult{0x7F9A0218}; //!< The decrypted magic for a single font in the shared font data
|
||||||
constexpr u32 SharedFontMagic{0x36F81A1E}; //!< The encrypted magic for a single font in the shared font data
|
constexpr u32 SharedFontMagic{0x36F81A1E}; //!< The encrypted magic for a single font in the shared font data
|
||||||
constexpr u32 SharedFontKey{SharedFontMagic ^ SharedFontResult}; //!< The XOR key for encrypting the font size
|
constexpr u32 SharedFontKey{SharedFontMagic ^ SharedFontResult}; //!< The XOR key for encrypting the font size
|
||||||
|
|
||||||
auto pointer{reinterpret_cast<u32 *>(fontSharedMem->kernel.address)};
|
auto ptr{reinterpret_cast<u32 *>(fontSharedMem->kernel.ptr)};
|
||||||
for (auto &font : fontTable) {
|
for (auto &font : fontTable) {
|
||||||
*pointer++ = SharedFontResult;
|
*ptr++ = SharedFontResult;
|
||||||
*pointer++ = font.length ^ SharedFontKey;
|
*ptr++ = font.length ^ SharedFontKey;
|
||||||
font.offset = reinterpret_cast<u64>(pointer) - fontSharedMem->kernel.address;
|
font.offset = reinterpret_cast<u64>(ptr) - reinterpret_cast<u64>(fontSharedMem->kernel.ptr);
|
||||||
|
|
||||||
std::memcpy(pointer, font.data, font.length);
|
std::memcpy(ptr, font.data, font.length);
|
||||||
pointer = reinterpret_cast<u32 *>(reinterpret_cast<u64>(pointer) + font.length);
|
ptr = reinterpret_cast<u32 *>(reinterpret_cast<u8 *>(ptr) + font.length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user