a priori c'est pas simplement faisable...
le gars qui code notre editeur (en C#, avec quelques morceaux en C++ managed et natif), disait qu'a priori on pouvait binder que des fonctions __stdcall natives a une delegate C#.
mais ca peut etre pratique d'eviter d'avoir a stocker en C# un pointeur vers l'objet qu'on passe en premier argument a une fonction native stub qui va faire un cast et appeller la bonne methode sur l'objet.. vu que ca implique de wrapper a la main toutes les methodes qu'on veut rendre appellables. (bon ok on peut simplifier ca via des macros, mais c'est quand meme particulierement chiant et sale.
behold... une solution pour appeler des methodes natives non statiques depuis une simple delegate C#, en stockant seulement la delegate elle-meme

si jamais ca sert a quelqu'un.. ^^
(si ca se trouve ya vraiment moyen de faire ca sans tricker et aller aussi bas-niveau, mais notre gars specialise dans le C# dit que non, donc j'ai pas pris la peine de regarder... pis.. j'ai fait ca "pour le sport", c'etait drole

header:
// <header>
#ifndef __NEDMEMBERDELEGATE_H__
#define __NEDMEMBERDELEGATE_H__
//----------------------------------------------------------------------------
class HH_NATIVE_EDITOR_EXPORT CMemberDelegatePool
{
private:
struct SDelegatePage
{
void *m_Code;
void *m_Data;
hh_u32 m_AvailableSlots;
hh_u32 m_AvailableSlotSearchStart;
SDelegatePage(void *codePtr, void *dataPtr);
};
hh_u32 m_PageSizeInBytes;
hh_u32 m_SlotCountInPage;
TArray<SDelegatePage> m_Pages;
bool _CreatePage(SDelegatePage &outPage) const;
void _DestroyPage(SDelegatePage &outPage) const;
bool _FindFreeSlot(const void *&codePtr, void *&dataPtr);
public:
CMemberDelegatePool();
~CMemberDelegatePool();
const void *CreateMemberCall(void *ptrThis, const void *ptrToCall, bool debugBreakOnEntry = false);
void DestroyMemberCall(const void *callPtr);
hh_u32 ActiveCount() const;
hh_u32 AvailableCount() const;
};
//----------------------------------------------------------------------------
#endif //__NEDMEMBERDELEGATE_H__
.cpp:
// <header>
#include "NEdPrecompiled.h"
#include "NEdMemberDelegate.h"
// uncommenting this will generate slightly slower stubs, but will fit 495 delegates instead of 431 in a single code-page.
#define USE_COMPACT_FORM
// uncommenting this will enable breakpoints on stub-entry
#ifdef _DEBUG
#define ENABLE_BREAK_ON_ENTRY
#endif
// if breakpoints are enabled, uncommenting this will break when calling EVERY delegate
//#define BREAK_ON_ALL_ENTRIES
//----------------------------------------------------------------------------
//
// Brief description of how this works.
//
// the C# delegate can only call a simple native __stdcall function pointer.
//
// Delegate d;
// d.DynamicInvoke(args);
//
// we want to call __thiscall native member functions on a specific object instance.
// instead of writing a separate __stdcall stub for each member we want to call, and having to
// manually pass the instance to the function, the idea is to assemble a dynamic stub,
// with the 'this' pointer embedded in the bytecode, and make the C# delegate jump to that adress,
// as if it were an stdcall function.
// the dynamic bytecode will then move the hardcoded 'this' pointer into ecx, and jump to the
// real member function address.
//
// basically, the basic member delegate is an executable buffer containing the following asm code:
//
// mov ecx, 0xTHISPOINTER ; 5 bytes
// mov eax, 0xJUMPTARGET ; 5 bytes
// jmp eax ; 2 bytes
//
// this can take several forms.
// the most compact one, uses a relative jump, and the jump offset is computed by subtracting
// the adress of the byte right after the jump instruction to the jump target address. (eip gets incremented after decoding)
//
// mov ecx, 0xTHISPOINTER ; 5 bytes
// jmp 0xRELATIVEJUMPOFFSET ; 5 bytes
//
// the thing is, we can only set execution rights for a whole page.
// this has the nasty side effect of requiring a 4Kb page for each delegate created.
// so instead we can batch multiple delegates into the same page:
//
// Code page
// .-----------------------,-----------------------,-----------------------,---------
// | mov ecx, 0xTHISPTR_0 | mov ecx, 0xTHISPTR_1 | mov ecx, 0xTHISPTR_2 |
// | jmp 0xRELJUMPOFFSET_0 | jmp 0xRELJUMPOFFSET_1 | jmp 0xRELJUMPOFFSET_2 | etc...
// '-----------------------'-----------------------'-----------------------'---------
// 0 10 20 30
//
// but we'll want to dynamically add or remove them.
// this means that we'll have to set access rights to write-execute, instead of execute only,
// and flush the instruction cache after each modification, to take into account the modified buffer.
// that's probably not a good idea. (we could change the access rights back and forth instead of using write-execute,
// but that's even worse, especially if threading is involved)
//
// this is solved by using two pages. a data page in read/write, containing both the 'this' pointer and the absolute jump target address,
// and a code page in execute only mode, that's already filled with pointer-independent stub code, that loads the pointers from
// the data page, moves one to ecx and then jumps to the other.
//
// the basic layout looks like:
//
// Code page
// .-----------------------,-----------------------,-----------------------,---------
// | mov eax, DataPage + 0 | mov eax, DataPage + 8 | mov eax, DataPage + 12|
// | mov ecx, [eax + 4] | mov ecx, [eax + 4] | mov ecx, [eax + 4] |
// | mov eax, [eax] | mov eax, [eax] | mov eax, [eax] | etc...
// | jmp eax | jmp eax | jmp eax |
// '-----------------------'-----------------------'-----------------------'---------
// 0 12 24 36
//
// Data page
// .-----------------,-----------------,-----------------,-----------------,---------
// | 0xJUMPTARGET_0 | 0xJUMPTARGET_1 | 0xJUMPTARGET_2 | 0xJUMPTARGET_3 |
// | 0xTHISPTR_0 | 0xTHISPTR_1 | 0xTHISPTR_2 | 0xTHISPTR_3 | etc...
// '-----------------'-----------------'-----------------'-----------------'---------
// 0 8 16 24 32
//
// the code page is pre-filled with the pointer-independent code when it is first created,
// and when setting a new delegate, only the data page is modified, so there is no need
// to flush the instruction cache.
//
// the bytecode takes 12 bytes per delegate. this gives us 341 delegates per 4K page.
// the theoretical maximum will be 512 delegates per page, as we are limited by the
// data page, having to store 8 bytes of pointer data per delegate.
//
// we can improve this by factoring out common instructions into a single location.
// the basic idea is to have this:
//
// mov eax, DataPage + offset ; repeated as many times as there are delegates, with a different 'offset' each time
// jmp _CommonStubCode
// ...
// _CommonStubCode: ; only one instance needed for 'n' delegate, takes 7 bytes
// mov ecx, [eax + 4]
// mov eax, [eax]
// jmp eax
//
// the problem is that these two instructions:
//
// mov eax, DataPage + offset
// jmp _CommonStubCode
//
// take already 10 bytes, because the relative offset of the 'jmp' instruction won't fit in a single byte. this gives us 408 delegates (which is already better than 341,
// but it produces entry points that are not 4-byte aligned, even though this is not necessarily dramatic)
// we can crank-up 573 delegates in there by using a slightly different approach:
//
// if we use a 1-byte encoded relative jump, and the following layout in the code page:
//
// Code page
// ,------.-------,------,------,------,-----,------,------,------,------,-------,------,-----
// | D___ | CSTUB | D-14 | D-21 | D-28 | ... | D+19 | D+12 | D+05 | D___ | CSTUB | D-14 | ...
// '------'-------'------'------'------'-----'------'------'------'------'-------'------'-----
// 0 5 12 19 26
// <-------------|------|------' '------|------|------>
// <-------------|------' '------|------>
// <-------------' '------>
//
// CSTUB is the common stub code (7 bytes):
// mov ecx, [eax + 4] ; 8B 48 04
// mov eax, [eax] ; 8B 00
// jmp eax ; FF E0
//
// D-OFFSET is the specific pointer-loader (7 bytes)
// mov eax, DataPage + slotId ; B8 __ __ __ __
// jmp OFFSET ; EB __
//
// D___ is a special version of the pointer-loader, that doesn't jump, as it is located right before the CSTUB (5 bytes)
// mov eax, DataPage + slotId ; B8 __ __ __ __
//
// the offsets for each chunk are:
// 7-byte stubs (unaligned)
// -14 -21 -28 -35 -42 -49 -56 -63 -70 -77 -84 -91 -98 -105 -112 -119 -126 +124 +117 +110 +103 +96 +89 +82 +75 +68 +61 +54 +47 +40 +33 +26 +19 +12 +5 D___ (35 + 1)
// 8-byte stubs
// -16 -24 -32 -40 -48 -56 -64 -72 -80 -88 -96 -104 -112 -120 -128 +120 +112 +104 +96 +88 +80 +72 +64 +56 +48 +40 +32 +24 +16 +8 D___ (30 + 1)
// 8-byte stubs (unaligned)
// -16 -24 -32 -40 -48 -56 -64 -72 -80 -88 -96 -104 -112 -120 -128 +125 +117 +109 +101 +93 +85 +77 +69 +61 +53 +45 +37 +29 +21 +13 +5 D___ (31 + 1)
//
// there are a fixed 5 + 7 = 12 bytes at the beginning of the page.
//
// chunksize = n * 7 + 5 + 7
// chunkcount = (4096 - 12) / chunksize
// lastchunksize = (4096 - 12) - chunkcount * chunksize
// lastchunkDcount = (lastchunksize - 5 - 7) / 7
// totalcount = 1 + chunkcount * (n + 1) + lastchunkDcount + 1
//
// n E [0,35]
//
// n = 35, totalcount = 573
// n = 30, totalcount = 570
// n = 25, totalcount = 568
//
// that's cool, but useless, as we won't be able to use more than 512 delegates, due to the data page layout.
// by padding the codes to 8 bytes instead of 7, we'll have:
//
// 8 fixed bytes at the beginning
// 31 * 8 + 8 = 256 bytes
// 15 full chunks + 1 partial chunk of 248 bytes
// partial chunk has (248-8)/8 = 30 delegates
// total = 15 * 31 + 30 = 495 delegates
//
//----------------------------------------------------------------------------
#if !defined(ENABLE_BREAK_ON_ENTRY) && defined(BREAK_ON_ALL_ENTRIES)
#undef BREAK_ON_ALL_ENTRIES
#endif
//----------------------------------------------------------------------------
namespace Internal
{
static const hh_u32 kDataSize = 4 * 2; // two int32 pointers; if we switch to 64 bits, we'll have to change the bytecode too anyway...
static const hh_u32 kDataAlignment = 4;
static const hh_u32 kAlignedDataSize = (kDataSize + kDataAlignment - 1) & (~(kDataAlignment - 1));
HH_STATIC_ASSERT(kAlignedDataSize == 8);
#ifndef USE_COMPACT_FORM
static const hh_u32 kRealCodeSize = 12; // 5 + 3 + 2 (mov reg, mem32 ; mov reg, [mem] ; jmp [mem])
static const hh_u32 kInstructionAlignment = 4;
static const hh_u32 kAlignedCodeSize = (kRealCodeSize + kInstructionAlignment - 1) & (~(kInstructionAlignment - 1));
HH_STATIC_ASSERT(kAlignedCodeSize == 12);
#else
static const hh_u32 kDelegatesPerChunk = 31;
static const hh_u32 kStubDelSize = 8;
static const hh_u32 kStubCstSize = 8;
static const hh_u32 kChunkSize = kDelegatesPerChunk * kStubDelSize + kStubCstSize;
HH_STATIC_ASSERT(kChunkSize == 256);
hh_u8 *_WriteCommonStub(hh_u8 *dst)
{
// mov ecx, [eax + 4] ; 8B 48 04
// mov eax, [eax] ; 8B 00
// jmp eax ; FF E0
// int3 ; CC (this should never be reached)
*reinterpret_cast<hh_u32*>(dst + 0) = 0x8B04488B;
*reinterpret_cast<hh_u32*>(dst + 4) = 0xCCE0FF00;
return dst + 8;
}
static const hh_i8 kRelJmpOffsets_Aligned8[30] =
{
-15, -23, -31, -39, -47, -55, -63, -71, -79, -87, -95, -103, -111, -119, -127,
+121, +113, +105, +97, +89, +81, +73, +65, +57, +49, +41, +33, +25, +15, +9
};
#endif
}
//----------------------------------------------------------------------------
//
// delegate pool container
//
//----------------------------------------------------------------------------
CMemberDelegatePool::CMemberDelegatePool()
: m_PageSizeInBytes(0)
, m_SlotCountInPage(0)
{
SYSTEM_INFO sysInfo;
::GetSystemInfo(&sysInfo);
m_PageSizeInBytes = sysInfo.dwPageSize;
HH_ASSERT(m_PageSizeInBytes > 32);
#ifdef USE_COMPACT_FORM
hh_u32 trimmedPageSize = m_PageSizeInBytes - ::Internal::kStubCstSize;
hh_u32 chunkCountInPage = trimmedPageSize / ::Internal::kChunkSize;
hh_u32 lastChunkSize = trimmedPageSize - chunkCountInPage * ::Internal::kChunkSize;
hh_u32 delegateCountInLastChunk = (lastChunkSize - ::Internal::kStubCstSize) / ::Internal::kStubDelSize;
hh_u32 stubCountInPage = chunkCountInPage * ::Internal::kDelegatesPerChunk + delegateCountInLastChunk;
#else
hh_u32 stubCountInPage = m_PageSizeInBytes / ::Internal::kAlignedCodeSize;
#endif
hh_u32 dataCountInPage = m_PageSizeInBytes / ::Internal::kDataSize;
m_SlotCountInPage = HHMin(stubCountInPage, dataCountInPage);
}
//----------------------------------------------------------------------------
CMemberDelegatePool::~CMemberDelegatePool()
{
for (hh_u32 i = 0; i < m_Pages.Count(); i++)
{
_DestroyPage(m_Pages[i]);
}
}
//----------------------------------------------------------------------------
CMemberDelegatePool::SDelegatePage::SDelegatePage(void *codePtr, void *dataPtr)
: m_Code(codePtr)
, m_Data(dataPtr)
, m_AvailableSlots(0)
, m_AvailableSlotSearchStart(0)
{
}
//----------------------------------------------------------------------------
bool CMemberDelegatePool::_CreatePage(SDelegatePage &outPage) const
{
outPage.m_Code = ::VirtualAlloc(null, m_PageSizeInBytes * 2, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); // will clear the allocated pages to 0
outPage.m_Data = Mem::AdvanceRawPointer(outPage.m_Code, m_PageSizeInBytes);
if (outPage.m_Code == null)
{
_DestroyPage(outPage);
return false;
}
outPage.m_AvailableSlots = m_SlotCountInPage;
// now, for each delegate slot, emit the instructions that will fetch the pointers from the data pages, and jump to the correct member function
HH_STATIC_ASSERT(sizeof(outPage.m_Data) == sizeof(hh_u32)); // the code below won't work otherwise
hh_u8 *codeWritePtr = static_cast<hh_u8*>(outPage.m_Code);
hh_u32 dataWritePtr = reinterpret_cast<hh_u32>(outPage.m_Data);
#ifdef USE_COMPACT_FORM
hh_u32 trimmedPageSize = m_PageSizeInBytes - ::Internal::kStubCstSize;
hh_u32 chunkCountInPage = trimmedPageSize / ::Internal::kChunkSize;
hh_u32 lastChunkSize = trimmedPageSize - chunkCountInPage * ::Internal::kChunkSize;
hh_u32 delegateCountInLastChunk = (lastChunkSize - ::Internal::kStubCstSize) / ::Internal::kStubDelSize;
const hh_i32 _offsetFixup[3] = { -1, -1, 30 - (delegateCountInLastChunk - 1) - 1 };
const hh_i32 *offsetFixup = _offsetFixup;
hh_u32 delegateCountInChunk = ::Internal::kDelegatesPerChunk;
codeWritePtr = ::Internal::_WriteCommonStub(codeWritePtr);
for (hh_u32 i = 0; i <= chunkCountInPage; i++)
{
if (i == chunkCountInPage)
{
// write partial chunk
delegateCountInChunk = delegateCountInLastChunk;
++offsetFixup;
}
codeWritePtr[0] = 0xB8;
for (hh_u32 i = 1; i < delegateCountInChunk; i++)
{
// mov eax, dataWritePtr ; B8 __ __ __ __
// jmp relOffset ; EB __
// int3 ; CC (this should never be reached)
*reinterpret_cast<hh_u32*>(codeWritePtr + 1) = dataWritePtr;
*reinterpret_cast<hh_u32*>(codeWritePtr + 5) = 0xB8CC00EB; // ... EB __ CC] [B8...
// the offset fixup is here for the last partial chunk, when we've passed the negative offsets, and are going into the positive ones,
// we must directly jump to smaller positive offsets, based on how many delegates we have to write.
// for instance, if we have 19 delegates into the last partial chunk, instead of the normal 30,
// after the 15th delegate (offset = -128), as we'll have only 4 delegates left, we must not write:
// ... -120, -128, +120, +112, +104, +96, D___, CSTUB
// but:
// ... -120, -128, +32, +24, +16, +8, D___, CSTUB
codeWritePtr[6] = ::Internal::kRelJmpOffsets_Aligned8[i + offsetFixup[i / 16]];
codeWritePtr += ::Internal::kStubDelSize;
dataWritePtr += ::Internal::kAlignedDataSize;
}
// write fall-through 'nop' chain (the last byte will be overwritten by the common stub)
// here, we use a dummy segment prefix with the lea instruction to produce a single 3-byte 'nop' instruction:
*reinterpret_cast<hh_u32*>(codeWritePtr + 5) = 0xCC1B8D3E; // lea ebx, ds:[ebx]; int3 ; 3E 8D 1B CC
*reinterpret_cast<hh_u32*>(codeWritePtr + 1) = dataWritePtr;
codeWritePtr += ::Internal::kStubDelSize;
dataWritePtr += ::Internal::kAlignedDataSize;
codeWritePtr = ::Internal::_WriteCommonStub(codeWritePtr);
}
#else
// mov eax, [storagePtr] : B8 __ __ __ __
// mov ecx, [eax + 4] : 8B 48 04
// mov eax, [eax] : 8B 00
// jmp eax : FF E0
codeWritePtr[0] = 0xB8;
for (hh_u32 i = 0; i < m_SlotCountInPage - 1; i++)
{
*reinterpret_cast<hh_u32*>(codeWritePtr + 1) = dataWritePtr;
*reinterpret_cast<hh_u32*>(codeWritePtr + 5) = 0x8B04488B;
*reinterpret_cast<hh_u32*>(codeWritePtr + 9) = 0xB8E0FF00;
codeWritePtr += ::Internal::kAlignedCodeSize;
dataWritePtr += ::Internal::kAlignedDataSize;
}
*reinterpret_cast<hh_u32*>(codeWritePtr + 1) = dataWritePtr;
*reinterpret_cast<hh_u32*>(codeWritePtr + 5) = 0x8B04488B;
codeWritePtr[ 9] = 0x00;
codeWritePtr[10] = 0xFF;
codeWritePtr[11] = 0xE0;
codeWritePtr += ::Internal::kAlignedCodeSize;
#endif
// if we've got some unused bytes at the end of the code page, set them to 'int3', we should never jump into them.
hh_u32 unusedBytes = (static_cast<hh_u8*>(outPage.m_Code) + m_PageSizeInBytes) - codeWritePtr;
if (unusedBytes != 0)
{
memset(codeWritePtr, 0xCC, unusedBytes);
}
// set execute-only access rights on the code page, we won't touch it anymore.
DWORD prevProtect;
if (::VirtualProtect(outPage.m_Code, m_PageSizeInBytes, PAGE_EXECUTE, &prevProtect) == FALSE)
{
// ouch.. painful failure, we won't be able to execute the code
HH_ASSERT_NOT_REACHED_MESSAGE("CMemberDelegatePool : VirtualProtect() failed to set execution access rights to the code-page");
}
return true;
}
//----------------------------------------------------------------------------
void CMemberDelegatePool::_DestroyPage(SDelegatePage &outPage) const
{
if (outPage.m_Code != null)
{
::VirtualFree(outPage.m_Code, 0, MEM_RELEASE);
}
outPage.m_Code = null;
outPage.m_Data = null;
}
//----------------------------------------------------------------------------
// if 'false' is returned, doesn't touch codePtr and dataPtr
bool CMemberDelegatePool::_FindFreeSlot(const void *&codePtr, void *&dataPtr)
{
for (hh_u32 i = 0; i < m_Pages.Count(); i++)
{
SDelegatePage &p = m_Pages[i];
if (p.m_AvailableSlots != 0)
{
hh_u32 *data32Ptr = static_cast<hh_u32*>(p.m_Data);
data32Ptr = Mem::AdvanceRawPointer(data32Ptr, ::Internal::kAlignedDataSize * p.m_AvailableSlotSearchStart);
for (hh_u32 i = p.m_AvailableSlotSearchStart; i < m_SlotCountInPage; i++)
{
if (data32Ptr[0] == 0)
{
#ifdef USE_COMPACT_FORM
hh_u32 subChunkId = i / ::Internal::kDelegatesPerChunk;
hh_u32 posInChunk = i - subChunkId * ::Internal::kDelegatesPerChunk;
codePtr = Mem::AdvanceRawPointer(p.m_Code, ::Internal::kStubCstSize + subChunkId * ::Internal::kChunkSize + posInChunk * ::Internal::kStubDelSize);
#else
codePtr = Mem::AdvanceRawPointer(p.m_Code, ::Internal::kAlignedCodeSize * i);
#endif
HH_ASSERT((const hh_u8*)codePtr >= (const hh_u8*)p.m_Code + 8);
dataPtr = data32Ptr;
p.m_AvailableSlots--;
p.m_AvailableSlotSearchStart = i + 1;
return true;
}
data32Ptr = Mem::AdvanceRawPointer(data32Ptr, ::Internal::kAlignedDataSize);
}
HH_ASSERT_NOT_REACHED(); // 'm_AvailableSlots' != 0, yet no free slot was found
}
}
if (m_Pages.PushBack(SDelegatePage(null, null)).Valid())
{
SDelegatePage &p = m_Pages.Last();
if (_CreatePage(p))
{
#ifdef USE_COMPACT_FORM
codePtr = static_cast<hh_u8*>(p.m_Code) + ::Internal::kStubCstSize;
#else
codePtr = p.m_Code;
#endif
dataPtr = p.m_Data;
p.m_AvailableSlots--;
p.m_AvailableSlotSearchStart = 1;
return true;
}
m_Pages.PopBackAndDiscard();
}
return false;
}
//----------------------------------------------------------------------------
const void *CMemberDelegatePool::CreateMemberCall(void *ptrThis, const void *ptrToCall, bool debugBreakOnEntry /*= false*/)
{
const void *codePtr = null;
void *dataPtr = null;
if (_FindFreeSlot(codePtr, dataPtr))
{
HH_ASSERT(codePtr != null && dataPtr != null);
hh_u32 *data32Ptr = static_cast<hh_u32*>(dataPtr);
data32Ptr[0] = hh_u32(ptrToCall); // keep this one first, it'll never be null, not like the 'this' ptr, and allows us to quickly search through the page without checking the 'this' pointer too.
data32Ptr[1] = hh_u32(ptrThis);
#ifdef ENABLE_BREAK_ON_ENTRY
#ifndef BREAK_ON_ALL_ENTRIES
if (debugBreakOnEntry)
#endif
codePtr = Mem::AdvanceRawPointer(codePtr, -1);
#endif
}
return codePtr;
}
//----------------------------------------------------------------------------
void CMemberDelegatePool::DestroyMemberCall(const void *callPtr)
{
#ifdef ENABLE_BREAK_ON_ENTRY
callPtr = Mem::AdvanceRawPointer(callPtr, +1);
#ifndef BREAK_ON_ALL_ENTRIES
callPtr = (const void*)(hh_ureg(callPtr) & (~hh_ureg(7)));
#endif
#endif
// we can directly know the page address by aligning 'callPtr' to the lower page boundary, and do a fast pointer compare within our page list
hh_ureg parentCodePage = hh_ureg(callPtr) & ~(m_PageSizeInBytes - 1);
for (hh_u32 i = 0; i < m_Pages.Count(); i++)
{
SDelegatePage &p = m_Pages[i];
if (p.m_Code == (void*)parentCodePage)
{
hh_ureg offset = hh_ureg(callPtr) - parentCodePage;
#ifdef USE_COMPACT_FORM
offset -= ::Internal::kStubCstSize;
hh_u32 subChunkId = offset / ::Internal::kChunkSize;
hh_u32 posInChunk = offset - subChunkId * ::Internal::kChunkSize;
HH_ASSERT(posInChunk % ::Internal::kStubDelSize == 0);
hh_ureg slotIndex = subChunkId * ::Internal::kDelegatesPerChunk + posInChunk / ::Internal::kStubDelSize;
#else
HH_ASSERT(offset % ::Internal::kAlignedCodeSize == 0);
hh_ureg slotIndex = offset / ::Internal::kAlignedCodeSize;
#endif
hh_u32 *dataPtr = reinterpret_cast<hh_u32*>(static_cast<hh_u8*>(p.m_Data) + slotIndex * ::Internal::kAlignedDataSize);
dataPtr[0] = 0;
dataPtr[1] = 0; // we don't need to clear this one, as we're only using the first element to determine if the slot is free, but do it just to be clean...
p.m_AvailableSlots++;
if (slotIndex < p.m_AvailableSlotSearchStart)
p.m_AvailableSlotSearchStart = slotIndex;
return;
}
}
}
//----------------------------------------------------------------------------
hh_u32 CMemberDelegatePool::ActiveCount() const
{
hh_u32 count = 0;
for (hh_u32 i = 0; i < m_Pages.Count(); i++)
{
HH_ASSERT(m_SlotCountInPage >= m_Pages[i].m_AvailableSlots);
count += m_SlotCountInPage - m_Pages[i].m_AvailableSlots;
}
return count;
}
//----------------------------------------------------------------------------
hh_u32 CMemberDelegatePool::AvailableCount() const
{
hh_u32 count = 0;
for (hh_u32 i = 0; i < m_Pages.Count(); i++)
{
count += m_Pages[i].m_AvailableSlots;
}
return count;
}
//----------------------------------------------------------------------------
le commentaire au debut du .cpp devrait etre explicite

en gros CreateMemberCall() prend le pointeur vers l'objet (du point de vue de la methode a appeller), l'addresse de la methode, et un bool optionnel pour dire si on veut breaker dans le debugger lors de l'appel de cette methode (ca peut etre pratique

bon, je laisse la partie C# comme exercice au lecteur


pour le binding, et la type-safety cote C#, il y a juste a ecrire une serie de fonctions de binding templatees qui choppent le typeid des parametres, les pushback dans un vector, et generent une declaration C# a partir de ca.
#ifndef __NEDNATIVEOBJECTBINDER_H__
#define __NEDNATIVEOBJECTBINDER_H__
#include <typeinfo>
//----------------------------------------------------------------------------
struct HH_NATIVE_EDITOR_EXPORT SFunctionDefinition
{
const char *Name;
const void *CodeEntry;
TArray<const char *> ArgumentTypes;
const char* ReturnType;
SFunctionDefinition() : Name(null), CodeEntry(null), ReturnType(null) {}
~SFunctionDefinition() { HH_ASSERT(Name == null && CodeEntry == null && ReturnType == null); }
bool _BindImpl(const char *name, void *thisPtr, const void *codePtr, bool debugBreakOnEntry);
template<typename _TC, typename _TR>
bool Bind(const char *name, _TC *thisPtr, _TR (_TC::*fn)(), bool debugBreakOnEntry)
{
ReturnType = typeid(_TR).name();
return _BindImpl(name, thisPtr, *reinterpret_cast<void**>(&fn), debugBreakOnEntry);
}
template<typename _TC, typename _TR, typename _T0>
bool Bind(const char *name, _TC *thisPtr, _TR (_TC::*fn)(_T0), bool debugBreakOnEntry)
{
ReturnType = typeid(_TR).name();
ArgumentTypes.PushBack(typeid(_T0).name()).Valid();
return _BindImpl(name, thisPtr, *reinterpret_cast<void**>(&fn), debugBreakOnEntry);
}
template<typename _TC, typename _TR, typename _T0, typename _T1>
bool Bind(const char *name, _TC *thisPtr, _TR (_TC::*fn)(_T0, _T1), bool debugBreakOnEntry)
{
ReturnType = typeid(_TR).name();
ArgumentTypes.PushBack(typeid(_T0).name());
ArgumentTypes.PushBack(typeid(_T1).name());
return _BindImpl(name, thisPtr, *reinterpret_cast<void**>(&fn), debugBreakOnEntry);
}
template<typename _TC, typename _TR, typename _T0, typename _T1, typename _T2>
bool Bind(const char *name, _TC *thisPtr, _TR (_TC::*fn)(_T0, _T1, _T2), bool debugBreakOnEntry)
{
ReturnType = typeid(_TR).name();
ArgumentTypes.PushBack(typeid(_T0).name());
ArgumentTypes.PushBack(typeid(_T1).name());
ArgumentTypes.PushBack(typeid(_T2).name());
return _BindImpl(name, thisPtr, *reinterpret_cast<void**>(&fn), debugBreakOnEntry);
}
void Unbind();
};
//----------------------------------------------------------------------------
class HH_NATIVE_EDITOR_EXPORT CNativeObjectBinder
{
protected:
TArray<SFunctionDefinition> m_FunctionList;
public:
virtual ~CNativeObjectBinder()
{
for (hh_u32 i = 0; i < m_FunctionList.Count(); i++)
{
m_FunctionList[i].Unbind();
}
}
const TArray<SFunctionDefinition> &ListFunctions() { return m_FunctionList; }
template<typename _TC, typename _TR>
bool RawBind(const char *name, _TC *thisPtr, _TR fn, bool debugBreakOnEntry)
{
if (m_FunctionList.PushBack().Valid())
{
m_FunctionList.Last().Bind(name, thisPtr, fn, debugBreakOnEntry);
return true;
}
return false;
}
template<typename _TC, typename _TR>
bool Bind(const char *name, _TR (_TC::*fn)(), bool debugBreakOnEntry = false) { return RawBind(name, static_cast<_TC*>(this), fn, debugBreakOnEntry); }
template<typename _TC, typename _TR, typename _T0>
bool Bind(const char *name, _TR (_TC::*fn)(_T0), bool debugBreakOnEntry = false) { return RawBind(name, static_cast<_TC*>(this), fn, debugBreakOnEntry); }
template<typename _TC, typename _TR, typename _T0, typename _T1>
bool Bind(const char *name, _TR (_TC::*fn)(_T0, _T1), bool debugBreakOnEntry = false) { return RawBind(name, static_cast<_TC*>(this), fn, debugBreakOnEntry); }
template<typename _TC, typename _TR, typename _T0, typename _T1, typename _T2>
bool Bind(const char *name, _TR (_TC::*fn)(_T0, _T1, _T2), bool debugBreakOnEntry = false) { return RawBind(name, static_cast<_TC*>(this), fn, debugBreakOnEntry); }
template<typename _TC, typename _TR>
bool Bind(_TC *thisPtr, const char *name, _TR (_TC::*fn)(), bool debugBreakOnEntry = false) { return RawBind(name, static_cast<_TC*>(thisPtr), fn, debugBreakOnEntry); }
template<typename _TC, typename _TR, typename _T0>
bool Bind(_TC *thisPtr, const char *name, _TR (_TC::*fn)(_T0), bool debugBreakOnEntry = false) { return RawBind(name, static_cast<_TC*>(thisPtr), fn, debugBreakOnEntry); }
template<typename _TC, typename _TR, typename _T0, typename _T1>
bool Bind(_TC *thisPtr, const char *name, _TR (_TC::*fn)(_T0, _T1), bool debugBreakOnEntry = false) { return RawBind(name, static_cast<_TC*>(thisPtr), fn, debugBreakOnEntry); }
template<typename _TC, typename _TR, typename _T0, typename _T1, typename _T2>
bool Bind(_TC *thisPtr, const char *name, _TR (_TC::*fn)(_T0, _T1, _T2), bool debugBreakOnEntry = false) { return RawBind(name, static_cast<_TC*>(thisPtr), fn, debugBreakOnEntry); }
};
//----------------------------------------------------------------------------
#endif // __NEDNATIVEOBJECTBINDER_H__
SFunctionDefinition::_BindImpl() et SFunctionDefinition::Unbind() ne font qu'appeller CMemberDelegatePool::CreateMemberCall(), et CMemberDelegatePool:
