public class PooledByteBufAllocator extends AbstractByteBufAllocator { private final PoolThreadLocalCache threadCache; private final PoolArena<byte[]>[] heapArenas;//一个线程会和一个PoolArena绑定 private final PoolArena<ByteBuffer>[] directArenas;//一个线程会和一个PoolArena绑定 ... @Override protected ByteBuf newHeapBuffer(int initialCapacity, int maxCapacity) { PoolThreadCache cache = threadCache.get(); PoolArena<byte[]> heapArena = cache.heapArena; ByteBuf buf; if (heapArena != null) { //分配堆内存 buf = heapArena.allocate(cache, initialCapacity, maxCapacity); } else { buf = new UnpooledHeapByteBuf(this, initialCapacity, maxCapacity); } return toLeakAwareBuffer(buf); } @Override protected ByteBuf newDirectBuffer(int initialCapacity, int maxCapacity) { PoolThreadCache cache = threadCache.get(); PoolArena<ByteBuffer> directArena = cache.directArena; ByteBuf buf; if (directArena != null) { //分配直接内存 buf = directArena.allocate(cache, initialCapacity, maxCapacity); } else { if (PlatformDependent.hasUnsafe()) { buf = UnsafeByteBufUtil.newUnsafeDirectByteBuf(this, initialCapacity, maxCapacity); } else { buf = new UnpooledDirectByteBuf(this, initialCapacity, maxCapacity); } } return toLeakAwareBuffer(buf); } ... } abstract class PoolArena<T> implements PoolArenaMetric { ... PooledByteBuf<T> allocate(PoolThreadCache cache, int reqCapacity, int maxCapacity) { PooledByteBuf<T> buf = newByteBuf(maxCapacity);//创建ByteBuf对象 allocate(cache, buf, reqCapacity);//基于PoolThreadCache对ByteBuf对象进行内存分配 return buf; } private void allocate(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity) { //1.根据reqCapacity进行分段规格化 final int normCapacity = normalizeCapacity(reqCapacity); if (isTinyOrSmall(normCapacity)) {//capacity < pageSize,需要分配的内存小于8K int tableIdx; PoolSubpage<T>[] table; boolean tiny = isTiny(normCapacity); if (tiny) {//< 512 //2.进行缓存分配 if (cache.allocateTiny(this, buf, reqCapacity, normCapacity)) { //命中缓存,was able to allocate out of the cache so move on return; } tableIdx = tinyIdx(normCapacity); table = tinySubpagePools; } else { //2.进行缓存分配 if (cache.allocateSmall(this, buf, reqCapacity, normCapacity)) { //命中缓存,was able to allocate out of the cache so move on return; } tableIdx = smallIdx(normCapacity); table = smallSubpagePools; } final PoolSubpage<T> head = table[tableIdx]; //Synchronize on the head. //This is needed as PoolChunk#allocateSubpage(int) and PoolChunk#free(long) may modify the doubly linked list as well. synchronized (head) { final PoolSubpage<T> s = head.next; if (s != head) { assert s.doNotDestroy && s.elemSize == normCapacity; long handle = s.allocate(); assert handle >= 0; s.chunk.initBufWithSubpage(buf, handle, reqCapacity); if (tiny) { allocationsTiny.increment(); } else { allocationsSmall.increment(); } return; } } //没有命中缓存 allocateNormal(buf, reqCapacity, normCapacity); return; } if (normCapacity <= chunkSize) {//需要分配的内存大于8K,但小于16M //2.进行缓存分配 if (cache.allocateNormal(this, buf, reqCapacity, normCapacity)) { //命中缓存,was able to allocate out of the cache so move on return; } //没有命中缓存 allocateNormal(buf, reqCapacity, normCapacity); } else {//需要分配的内存大于16M //Huge allocations are never served via the cache so just call allocateHuge allocateHuge(buf, reqCapacity); } } //根据reqCapacity进行分段规格化 int normalizeCapacity(int reqCapacity) { if (reqCapacity < 0) { throw new IllegalArgumentException("capacity: " + reqCapacity + " (expected: 0+)"); } if (reqCapacity >= chunkSize) { return reqCapacity; } if (!isTiny(reqCapacity)) { // >= 512 int normalizedCapacity = reqCapacity; normalizedCapacity --; normalizedCapacity |= normalizedCapacity >>> 1; normalizedCapacity |= normalizedCapacity >>> 2; normalizedCapacity |= normalizedCapacity >>> 4; normalizedCapacity |= normalizedCapacity >>> 8; normalizedCapacity |= normalizedCapacity >>> 16; normalizedCapacity ++; if (normalizedCapacity < 0) { normalizedCapacity >>>= 1; } return normalizedCapacity; } if ((reqCapacity & 15) == 0) { return reqCapacity; } return (reqCapacity & ~15) + 16; } ... } final class PoolThreadCache { final PoolArena<byte[]> heapArena; final PoolArena<ByteBuffer> directArena; //Hold the caches for the different size classes, which are tiny, small and normal. //有32个MemoryRegionCache元素,分别存放16B、32B、48B、...、480B、496B的SubPage级别的内存 private final MemoryRegionCache<byte[]>[] tinySubPageHeapCaches; //有4个MemoryRegionCache元素,分别存放512B、1K、2K、4K的SubPage级别的内存 private final MemoryRegionCache<byte[]>[] smallSubPageHeapCaches; //有3个MemoryRegionCache元素,分别存放8K、16K、32K的Page级别的内存 private final MemoryRegionCache<byte[]>[] normalHeapCaches; private final MemoryRegionCache<ByteBuffer>[] tinySubPageDirectCaches; private final MemoryRegionCache<ByteBuffer>[] smallSubPageDirectCaches; private final MemoryRegionCache<ByteBuffer>[] normalDirectCaches; ... //Try to allocate a tiny buffer out of the cache. Returns true if successful false otherwise boolean allocateTiny(PoolArena<?> area, PooledByteBuf<?> buf, int reqCapacity, int normCapacity) { //首先调用cacheForTiny()方法找到需要分配的size对应的MemoryRegionCache //然后调用allocate()方法基于MemoryRegionCache去给ByteBuf对象分配内存 return allocate(cacheForTiny(area, normCapacity), buf, reqCapacity); } //找到需要分配的size对应的MemoryRegionCache private MemoryRegionCache<?> cacheForTiny(PoolArena<?> area, int normCapacity) { int idx = PoolArena.tinyIdx(normCapacity); if (area.isDirect()) { return cache(tinySubPageDirectCaches, idx); } return cache(tinySubPageHeapCaches, idx); } //根据索引去缓存数组中返回一个MemoryRegionCache元素 private static <T> MemoryRegionCache<T> cache(MemoryRegionCache<T>[] cache, int idx) { if (cache == null || idx > cache.length - 1) { return null; } return cache[idx]; } //基于MemoryRegionCache去给ByteBuf对象分配内存 private boolean allocate(MemoryRegionCache<?> cache, PooledByteBuf buf, int reqCapacity) { if (cache == null) { return false; } //调用MemoryRegionCache的allocate()方法给buf分配大小为reqCapacity的一块内存 boolean allocated = cache.allocate(buf, reqCapacity); if (++ allocations >= freeSweepAllocationThreshold) { allocations = 0; trim(); } return allocated; } ... private abstract static class MemoryRegionCache<T> { private final int size; private final Queue<Entry<T>> queue; private final SizeClass sizeClass; private int allocations; ... //Allocate something out of the cache if possible and remove the entry from the cache. public final boolean allocate(PooledByteBuf<T> buf, int reqCapacity) { //步骤一:从queue队列中弹出一个Entry元素 Entry<T> entry = queue.poll(); if (entry == null) { return false; } //步骤二:初始化buf initBuf(entry.chunk, entry.handle, buf, reqCapacity); //步骤三:将弹出的Entry元素放入对象池中进行复用 entry.recycle(); //allocations is not thread-safe which is fine as this is only called from the same thread all time. ++ allocations; return true; } //Init the PooledByteBuf using the provided chunk and handle with the capacity restrictions. protected abstract void initBuf(PoolChunk<T> chunk, long handle, PooledByteBuf<T> buf, int reqCapacity); static final class Entry<T> { final Handle<Entry<?>> recyclerHandle; PoolChunk<T> chunk; long handle = -1; Entry(Handle<Entry<?>> recyclerHandle) { this.recyclerHandle = recyclerHandle; } void recycle() { chunk = null; handle = -1; recyclerHandle.recycle(this); } } } }
final class PoolThreadCache { final PoolArena<byte[]> heapArena; final PoolArena<ByteBuffer> directArena; //Hold the caches for the different size classes, which are tiny, small and normal. //有32个MemoryRegionCache元素,分别存放16B、32B、48B、...、480B、496B的SubPage级别的内存 private final MemoryRegionCache<byte[]>[] tinySubPageHeapCaches; //有4个MemoryRegionCache元素,分别存放512B、1K、2K、4K的SubPage级别的内存 private final MemoryRegionCache<byte[]>[] smallSubPageHeapCaches; //有3个MemoryRegionCache元素,分别存放8K、16K、32K的Page级别的内存 private final MemoryRegionCache<byte[]>[] normalHeapCaches; private final MemoryRegionCache<ByteBuffer>[] tinySubPageDirectCaches; private final MemoryRegionCache<ByteBuffer>[] smallSubPageDirectCaches; private final MemoryRegionCache<ByteBuffer>[] normalDirectCaches; ... }
abstract class PoolArena<T> implements PoolArenaMetric { ... //不同规格的SubPage和PoolThreadCache的tinySubPageHeapCaches是一样的 //有32个元素:16B、32B、48B、...、480、496B private final PoolSubpage<T>[] tinySubpagePools; //有4个元素:512B、1K、2K、4K private final PoolSubpage<T>[] smallSubpagePools; ... } final class PoolChunk<T> implements PoolChunkMetric { final PoolArena<T> arena; final T memory;//内存 //一个Page的大小,比如8K private final int pageSize; //4096个元素的字节数组,表示不同规格的连续内存使用分配情况,用二叉树理解 private final byte[] memoryMap; //2048个元素的数组,表示Chunk里哪些Page是以SubPage方式存在的 //由于一个PoolChunk是16M,会以8K为标准划分一个个的Page,所以会有16 * 1024 / 8 = 2048个Page private final PoolSubpage<T>[] subpages; ... } final class PoolSubpage<T> implements PoolSubpageMetric { final PoolChunk<T> chunk;//属于哪个PoolChunk int elemSize;//当前SubPage是以多大的数值进行划分的 private final long[] bitmap;//用来记录当前SubPage的内存分配情况 private final int memoryMapIdx;//Page的index private final int pageSize;//Page大小 private final int runOffset;//当前SubPage的index PoolSubpage<T> prev; PoolSubpage<T> next; ... }
final class PoolChunkList<T> implements PoolChunkListMetric { private PoolChunk<T> head; ... //reqCapacity为需要分配的内存大小,normCapacity为已规格化的内存大小 boolean allocate(PooledByteBuf<T> buf, int reqCapacity, int normCapacity) { if (head == null || normCapacity > maxCapacity) { //Either this PoolChunkList is empty or the requested capacity is larger then the capacity //which can be handled by the PoolChunks that are contained in this PoolChunkList. returnfalse; } //从PoolChunkList的head节点开始往下遍历 for (PoolChunk<T> cur = head;;) { //对每一个PoolChunk都尝试调用PoolChunk.allocate()方法进行分配 long handle = cur.allocate(normCapacity); if (handle < 0) { cur = cur.next; if (cur == null) { returnfalse; } } else { //调用PoolChunk.initBuf()方法对PooledByteBuf进行初始化 cur.initBuf(buf, handle, reqCapacity); //如果此时的PoolChunk的使用率大于所属PoolChunkList的最大使用率 if (cur.usage() >= maxUsage) { //将当前PoolChunk从所在的PoolChunkList中移除 remove(cur); //将当前PoolChunk加入到下一个PoolChunkList中 nextList.add(cur); } returntrue; } } } ... }
final class PoolChunk<T> implements PoolChunkMetric { ... long allocate(int normCapacity) { if ((normCapacity & subpageOverflowMask) != 0) { // >= pageSize //Page级别的内存分配 return allocateRun(normCapacity); } else { //SubPage级别的内存分配 return allocateSubpage(normCapacity); } } //Allocate a run of pages (>=1) //@param normCapacity normalized capacity //@return index in memoryMap private long allocateRun(int normCapacity) { //maxOrder = 11、pageShifts = 13 //比如要分配的normCapacity = 8K,则d = 11 - (13 - 13) int d = maxOrder - (log2(normCapacity) - pageShifts); int id = allocateNode(d); if (id < 0) { return id; } freeBytes -= runLength(id); return id; } //Algorithm to allocate an index in memoryMap when we query for a free node at depth d //@param d depth //@return index in memoryMap private int allocateNode(int d) { int id = 1; int initial = - (1 << d);//has last d bits = 0and rest all = 1 //取出memoryMap[id]的值 byte val = value(id); if (val > d) {//val = unusable = 12return -1; } //val < d表示当前结点可用 while (val < d || (id & initial) == 0) {//id & initial == 1 << d for all ids at depth d, for < d it is 0 id <<= 1;//每循环一次乘以2 val = value(id); if (val > d) {//如果当前id对应的结点不可用 id ^= 1;//通过异或运算实现对id加1 val = value(id); } } byte value = value(id); assert value == d && (id & initial) == 1 << d : String.format("val = %d, id & initial = %d, d = %d", value, id & initial, d); setValue(id, unusable);//mark as unusable = 12 updateParentsAlloc(id);//逐层往上标记结点不可用 return id; } private byte value(int id) { return memoryMap[id]; } //Update method used by allocate. //This is triggered only when a successor is allocated and all its predecessors need to update their state. //The minimal depth at which subtree rooted at id has some free space. private void updateParentsAlloc(int id) { while (id > 1) { int parentId = id >>> 1; byte val1 = value(id); byte val2 = value(id ^ 1); byte val = val1 < val2 ? val1 : val2; setValue(parentId, val); id = parentId; } } ... }
abstract classPoolArena<T> implementsPoolArenaMetric { //有32个元素:16B、32B、48B、...、480、496BprivatefinalPoolSubpage<T>[] tinySubpagePools; //有4个元素:512B、1K、2K、4K private final PoolSubpage<T>[] smallSubpagePools; ... PooledByteBuf<T> allocate(PoolThreadCache cache, int reqCapacity, int maxCapacity) { PooledByteBuf<T> buf = newByteBuf(maxCapacity);//创建ByteBuf对象 allocate(cache, buf, reqCapacity);//基于PoolThreadCache对ByteBuf对象进行内存分配 return buf; } private void allocate(PoolThreadCache cache, PooledByteBuf<T> buf, final int reqCapacity) { //1.根据reqCapacity进行分段规格化 final int normCapacity = normalizeCapacity(reqCapacity); if (isTinyOrSmall(normCapacity)) {//capacity < pageSize,需要分配的内存小于8K int tableIdx; PoolSubpage<T>[] table; boolean tiny = isTiny(normCapacity); if (tiny) {//capacity < 512 if (cache.allocateTiny(this, buf, reqCapacity, normCapacity)) { //was able to allocate out of the cache so move on return; } //根据规格化后的需要分配的内存大小normCapacity,获取tableIdx tableIdx = tinyIdx(normCapacity); table = tinySubpagePools; } else { if (cache.allocateSmall(this, buf, reqCapacity, normCapacity)) { //was able to allocate out of the cache so move on return; } //根据规格化后的需要分配的内存大小normCapacity,获取tableIdx tableIdx = smallIdx(normCapacity); table = smallSubpagePools; } final PoolSubpage<T> head = table[tableIdx]; //Synchronize on the head. //This is needed as PoolChunk#allocateSubpage(int) and PoolChunk#free(long) may modify the doubly linked list as well. synchronized (head) { final PoolSubpage<T> s = head.next; if (s != head) {//PoolArena的tinySubpagePools数组中有可以分配内存的PoolSubpage assert s.doNotDestroy && s.elemSize == normCapacity; //调用SubPage的allocate()方法进行内存分配 long handle = s.allocate(); assert handle >= 0; s.chunk.initBufWithSubpage(buf, handle, reqCapacity); if (tiny) { allocationsTiny.increment(); } else { allocationsSmall.increment(); } return; } } allocateNormal(buf, reqCapacity, normCapacity); return; } ... } static int tinyIdx(int normCapacity) { return normCapacity >>> 4; } static int smallIdx(int normCapacity) { int tableIdx = 0; int i = normCapacity >>> 10; while (i != 0) { i >>>= 1; tableIdx ++; } return tableIdx; } private synchronized void allocateNormal(PooledByteBuf<T> buf, int reqCapacity, int normCapacity) { //1.尝试在现有的PoolChunk上分配 if (q050.allocate(buf, reqCapacity, normCapacity) || q025.allocate(buf, reqCapacity, normCapacity) || q000.allocate(buf, reqCapacity, normCapacity) || qInit.allocate(buf, reqCapacity, normCapacity) || q075.allocate(buf, reqCapacity, normCapacity)) { ++allocationsNormal; return; } //2.创建一个PoolChunk并进行内存分配 PoolChunk<T> c = newChunk(pageSize, maxOrder, pageShifts, chunkSize); //由handle指向PoolChunk里的一块连续内存 long handle = c.allocate(normCapacity); ++allocationsNormal; assert handle > 0; //3.初始化PooledByteBuf对象 c.initBuf(buf, handle, reqCapacity); //4.将新建的PoolChunk添加到PoolArena的qInit这个PoolChunkList中 qInit.add(c); } ... } final class PoolChunk<T> implements PoolChunkMetric { ... long allocate(int normCapacity) { if ((normCapacity & subpageOverflowMask) != 0) {//normCapacity >= pageSize //Page级别的内存分配 return allocateRun(normCapacity); } else { //SubPage级别的内存分配 return allocateSubpage(normCapacity); } } ... }
(2)SubPage级别的内存分配的流程
PoolChunk.allocateSubpage()方法的主要操作:
一.定位一个SubPage对象
二.初始化SubPage对象
三.调用SubPage的allocate()方法进行分配
final classPoolChunk<T> implements PoolChunkMetric { final PoolArena<T> arena; final T memory;//内存 //一个Page的大小,比如8K private final int pageSize; //4096个元素的字节数组,表示不同规格的连续内存使用分配情况,用二叉树理解 private final byte[] memoryMap; private final byte[] depthMap; //2048个元素的数组,表示Chunk里哪些Page是以SubPage方式存在的 //由于一个PoolChunk是16M,会以8K为标准划分一个个的Page,所以会有16 * 1024 / 8 = 2048个Page private final PoolSubpage<T>[] subpages; //Used to mark memory as unusable private final byte unusable;//默认是12 ... //Create/ initialize a new PoolSubpage of normCapacity //Any PoolSubpage created/ initialized here is added to subpage pool in the PoolArena that owns this PoolChunk //@param normCapacity normalized capacity //@return index in memoryMap private long allocateSubpage(int normCapacity) { //Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it. //This is need as we may add it back and so alter the linked-list structure. //1.定位一个SubPage对象 //PoolArena的findSubpagePoolHead()方法会通过除以16,来获取用来存放16B的PoolSubpage节点 PoolSubpage<T> head = arena.findSubpagePoolHead(normCapacity); synchronized (head) { //只能从层高为11的memoryMap中获取SubPage,因为只有这一层的每个结点都是8K int d = maxOrder;//subpages are only be allocated from pages i.e., leaves //1.定位一个SubPage对象:在平衡二叉树的第11层上分配一个结点 //即通过allocateNode(d)找到一个Page在PoolChunk中的下标idx intid = allocateNode(d); if (id < 0) { returnid; } final PoolSubpage<T>[] subpages = this.subpages; final int pageSize = this.pageSize; freeBytes -= pageSize; //1.定位一个SubPage对象:确定PoolChunk中第几个Page会以SubPage方式存在 int subpageIdx = subpageIdx(id); PoolSubpage<T> subpage = subpages[subpageIdx]; //2.初始化SubPage对象 if (subpage == null) { subpage = new PoolSubpage<T>(head, this, id, runOffset(id), pageSize, normCapacity); subpages[subpageIdx] = subpage; } else { subpage.init(head, normCapacity); } //3.调用SubPage的allocate()方法进行分配 return subpage.allocate(); } } ... }
abstract class PoolArena<T> implements PoolArenaMetric { //有32个元素:16B、32B、48B、...、480、496B private final PoolSubpage<T>[] tinySubpagePools; //有4个元素:512B、1K、2K、4K private final PoolSubpage<T>[] smallSubpagePools; ... //找到在PoolArena的tinySubpagePools中用于存放16B的PoolSubpage结点 PoolSubpage<T> findSubpagePoolHead(int elemSize) { int tableIdx; PoolSubpage<T>[] table; if (isTiny(elemSize)) {//< 512 tableIdx = elemSize >>> 4; table = tinySubpagePools; } else { tableIdx = 0; elemSize >>>= 10; while (elemSize != 0) { elemSize >>>= 1; tableIdx ++; } table = smallSubpagePools; } return table[tableIdx]; } ... } final class PoolChunk<T> implements PoolChunkMetric { ... //Algorithm to allocate an index in memoryMap when we query for a free node at depth d //@param d depth //@return index in memoryMap private int allocateNode(int d) { int id = 1; int initial = - (1 << d);//has last d bits = 0and rest all = 1 //取出memoryMap[id]的值 byte val = value(id); if (val > d) {//val = unusable = 12return -1; } //val < d表示当前结点可用 while (val < d || (id & initial) == 0) {//id & initial == 1 << d for all ids at depth d, for < d it is 0 id <<= 1;//每循环一次乘以2 val = value(id); if (val > d) {//如果当前id对应的结点不可用 id ^= 1;//通过异或运算实现对id加1 val = value(id); } } byte value = value(id); assert value == d && (id & initial) == 1 << d : String.format("val = %d, id & initial = %d, d = %d", value, id & initial, d); setValue(id, unusable);//mark as unusable = 12 updateParentsAlloc(id);//逐层往上标记结点不可用 return id; } private int subpageIdx(int memoryMapIdx) { return memoryMapIdx ^ maxSubpageAllocs;//remove highest set bit, to get offset } ... }
abstractclassPoolArena<T> implementsPoolArenaMetric { ... voidfreeChunk(PoolChunk<T> chunk, long handle, SizeClass sizeClass) { finalboolean destroyChunk; synchronized (this) { switch (sizeClass) { case Normal: ++deallocationsNormal; break; case Small: ++deallocationsSmall; break; case Tiny: ++deallocationsTiny; break; default: thrownewError(); } //调用PoolChunk的parent也就是PoolChunkList的free()方法释放PoolChunk destroyChunk = !chunk.parent.free(chunk, handle); } if (destroyChunk) { //destroyChunk not need to be called while holding the synchronized lock. destroyChunk(chunk); } } ... } final class PoolChunkList<T> implements PoolChunkListMetric { private PoolChunk<T> head; private PoolChunkList<T> prevList; ... boolean free(PoolChunk<T> chunk, long handle) { //标记PoolChunk中连续内存区段为未使用 chunk.free(handle); //如果要释放的PoolChunk的使用率小于当前PoolChunkList的最小使用率 if (chunk.usage() < minUsage) { //从当前PoolChunkList中移除PoolChunk remove(chunk); //将PoolChunk添加到当前PoolChunkList的下一个PoolChunkList中 return move0(chunk); } return true; } private void remove(PoolChunk<T> cur) { if (cur == head) { head = cur.next; if (head != null) { head.prev = null; } } else { PoolChunk<T> next = cur.next; cur.prev.next = next; if (next != null) { next.prev = cur.prev; } } } //Moves the PoolChunk down the PoolChunkList linked-list so it will end up in the right PoolChunkList //that has the correct minUsage / maxUsage in respect to PoolChunk#usage(). private boolean move0(PoolChunk<T> chunk) { if (prevList == null) { //There is no previous PoolChunkList so return false which result in having the PoolChunk destroyed and //all memory associated with the PoolChunk will be released. assert chunk.usage() == 0; return false; } return prevList.move(chunk); } ... } final class PoolChunk<T> implements PoolChunkMetric { final PoolArena<T> arena; private final PoolSubpage<T>[] subpages; ... //Free a subpage or a run of pages //When a subpage is freed from PoolSubpage, it might be added back to subpage pool of the owning PoolArena //If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize, //we can completely free the owning Page so it is available for subsequent allocations //@param handle handle to free void free(long handle) { int memoryMapIdx = memoryMapIdx(handle); int bitmapIdx = bitmapIdx(handle); if (bitmapIdx != 0) { // free a subpage PoolSubpage<T> subpage = subpages[subpageIdx(memoryMapIdx)]; assert subpage != null && subpage.doNotDestroy; //Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it. //This is need as we may add it back and so alter the linked-list structure. PoolSubpage<T> head = arena.findSubpagePoolHead(subpage.elemSize); synchronized (head) { //2.SubPage级别通过位图进行标记 if (subpage.free(head, bitmapIdx & 0x3FFFFFFF)) { return; } } } //1.Page级别根据二叉树来进行标记 freeBytes += runLength(memoryMapIdx); setValue(memoryMapIdx, depth(memoryMapIdx)); updateParentsFree(memoryMapIdx); } ... }