2
3
4
5
16 std::unique_lock<std::mutex> lock(m_Mutex);
18 return InternalNewSpan(k);
24
25
26 const size_t id = (
reinterpret_cast<size_t>(obj) >> MemoryPool::PAGE_SHIFT);
29
30
31 void* s = m_IdSpanMap.get(id);
35 return static_cast<
scl::
span*>(s);
45 std::unique_lock<std::mutex> lock(m_Mutex);
48
49
50 if (s->m_NPages > MemoryPool::PAGE_NUM - 1)
52 void* ptr =
reinterpret_cast<
void*>(s->m_PageId << MemoryPool::PAGE_SHIFT);
55 m_IdSpanMap.set(s->m_PageId,
nullptr);
62
63
66 const size_t leftId = s->m_PageId - 1;
68 scl::
span* leftSpan =
static_cast<scl::span*>(m_IdSpanMap.get(leftId));
80 if (leftSpan->m_NPages + s->m_NPages > MemoryPool::PAGE_NUM - 1)
85 s->m_PageId = leftSpan->m_PageId;
86 s->m_NPages += leftSpan->m_NPages;
88 m_SpanLists[leftSpan->m_NPages].Erase(leftSpan);
89 m_SpanPool.Delete(leftSpan);
93
94
97 const size_t rightId = s->m_PageId + s->m_NPages;
99 scl::
span* rightSpan =
static_cast<scl::span*>(m_IdSpanMap.get(rightId));
111 if (rightSpan->m_NPages + s->m_NPages > MemoryPool::PAGE_NUM - 1)
116 s->m_NPages += rightSpan->m_NPages;
118 m_SpanLists[rightSpan->m_NPages].Erase(rightSpan);
119 m_SpanPool.Delete(rightSpan);
123
124
125 m_SpanLists[s->m_NPages].PushFront(s);
128 m_IdSpanMap.set(s->m_PageId, s);
129 m_IdSpanMap.set(s->m_PageId + s->m_NPages - 1, s);
137
138
139 if (k > MemoryPool::PAGE_NUM - 1)
141 void* ptr = SystemAlloc(k);
142 scl::
span* s = m_SpanPool.New();
143 s->m_PageId =
reinterpret_cast<size_t>(ptr) >> MemoryPool::PAGE_SHIFT;
145 s->m_BlockSize = k * (1 << MemoryPool::PAGE_SHIFT);
147 m_IdSpanMap.set(s->m_PageId, s);
153
154
155 if (!m_SpanLists[k].Empty())
158
159
160 scl::
span* s = m_SpanLists[k].PopFront();
161 s->m_BlockSize = k * (1 << MemoryPool::PAGE_SHIFT);
163 for (size_t i = 0; i < s->m_NPages; ++i)
165 m_IdSpanMap.set(s->m_PageId + i, s);
172
173
174 for (size_t i = k + 1; i < MemoryPool::PAGE_NUM; ++i)
176 if (!m_SpanLists[i].Empty())
179
180
181 scl::span* nSpan = m_SpanLists[i].PopFront();
184
185
186 scl::span* kSpan = m_SpanPool.New();
187 kSpan->m_PageId = nSpan->m_PageId;
189 kSpan->m_BlockSize = (1 << MemoryPool::PAGE_SHIFT) * k;
191 nSpan->m_PageId += k;
192 nSpan->m_NPages -= k;
193 nSpan->m_BlockSize = (1 << MemoryPool::PAGE_SHIFT) * nSpan->m_NPages;
196
197
198 m_SpanLists[nSpan->m_NPages].PushFront(nSpan);
200 m_IdSpanMap.set(nSpan->m_PageId, nSpan);
201 m_IdSpanMap.set(nSpan->m_PageId + nSpan->m_NPages - 1, nSpan);
203 for (size_t j = 0; j < kSpan->m_NPages; ++j)
205 m_IdSpanMap.set(kSpan->m_PageId + j, kSpan);
213
214
215 void* ptr = SystemAlloc(MemoryPool::PAGE_NUM - 1);
218
219
220 scl::
span* bigSpan = m_SpanPool.New();
221 bigSpan->m_PageId =
reinterpret_cast<size_t>(ptr) >> MemoryPool::PAGE_SHIFT;
222 bigSpan->m_NPages = MemoryPool::PAGE_NUM - 1;
223 bigSpan->m_BlockSize = (1 << MemoryPool::PAGE_SHIFT) * bigSpan->m_NPages;
226
227
228 m_SpanLists[MemoryPool::PAGE_NUM - 1].PushFront(bigSpan);
230 return InternalNewSpan(k);
static PageCache m_PageCache
this single instance.
scl::span * MapObjectToSpan(void *obj) const
Find span by memory pointer.
scl::span * InternalNewSpan(size_t k)
Fetch pages span(internal call).
void ReleaseSpanToPageCache(scl::span *s)
Release span from cc to pc,.
scl::span * NewSpan(size_t k)
Fetch pages span.
Page memory cache. Third level of memory allocator.
bool m_IsUse
True if in use.
Used for manage multiple page memory.
static void SystemFree(void *ptr)
Free memory to system.