在https://blog.csdn.net/SweeNeil/article/details/83744843
中大概梳理了整个流程,还有_dl_map_object_from_fd(),以及link_map结构没有进行分析,在这里对这两部分进行分析
由于_dl_map_object_from_fd()比较长,整个函数的代码就放到最后作为附录,前面部分来一点点进行梳理。
一、_dl_map_object_from_fd函数内容梳理
首先在_dl_map_object_from_fd中,定义了link_map以及与elf格式相关的内容
struct link_map *l = NULL;
const ElfW(Ehdr) *header;
const ElfW(Phdr) *phdr;
const ElfW(Phdr) *ph;
size_t maplength;
上面的Ehdr对应于elf的文件头,Phdr对应于elf格式的程序头,其结构如下
typedef struct
{
unsigned char e_ident[EI_NIDENT];
/* Magic number and other info */
Elf64_Half e_type;
/* Object file type */
Elf64_Half e_machine;
/* Architecture */
Elf64_Word e_version;
/* Object file version */
Elf64_Addr e_entry;
/* Entry point virtual address */
Elf64_Off e_phoff;
/* Program header table file offset */
Elf64_Off e_shoff;
/* Section header table file offset */
Elf64_Word e_flags;
/* Processor-specific flags */
Elf64_Half e_ehsize;
/* ELF header size in bytes */
Elf64_Half e_phentsize;
/* Program header table entry size */
Elf64_Half e_phnum;
/* Program header table entry count */
Elf64_Half e_shentsize;
/* Section header table entry size */
Elf64_Half e_shnum;
/* Section header table entry count */
Elf64_Half e_shstrndx;
/* Section header string table index */
} Elf64_Ehdr;
typedef struct
{
Elf64_Word p_type;
/* Segment type */
Elf64_Word p_flags;
/* Segment flags */
Elf64_Off p_offset;
/* Segment file offset */
Elf64_Addr p_vaddr;
/* Segment virtual address */
Elf64_Addr p_paddr;
/* Segment physical address */
Elf64_Xword p_filesz;
/* Segment size in file */
Elf64_Xword p_memsz;
/* Segment size in memory */
Elf64_Xword p_align;
/* Segment alignment */
} Elf64_Phdr;
(关于elf格式有大量的博客进行介绍,可以参考其他博文,这里暂时没对elf格式进行介绍)
【Linux动态库加载函数dlopen源码梳理(二)】然后使用如下代码进行检查,是否已经存在一个link_map形容了我这个so文件
/* Look again to see if the real name matched another already loaded.*/
for (l = GL(dl_ns)[nsid]._ns_loaded;
l;
l = l->l_next)
if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
{
/* The object is already loaded.
Just bump its reference count and return it.*/
__close (fd);
/* If the name is not in the list of names for this object add
it.*/
free (realname);
add_name_to_object (l, name);
return l;
}
如果找到了,就关闭文件,返回这个link_map
可以看到在其中有这样一个if语句
if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
其中,st_ino,这是物理文件在内存中编号,且文件的设备号st_dev相同,从底层来比较文件,如果相同就证明已经形成了一个link_map,直接返回就可以了。
继续往下看,发现了在函数中对最开始的Ehdr进行赋值的情况,即对header进行了赋值,从注释中发现是将在open_verify中得到的fbp->buf中的内容赋值给header。
/* This is the ELF header.We read it in `open_verify'.*/
header = (void *) fbp->buf;
继续往下
/* Extract the remaining details we need from the ELF header
and then read in the program header table.*/
//程序入口的虚拟地址。如果目标文件无程序入口,可为0
l->l_entry = header->e_entry;
//目标文件标识(2-》可执行文件,3-》共享目标文件)
type = header->e_type;
//程序头部表项目
l->l_phnum = header->e_phnum;
//maplength等于程序头部表项目数量乘以程序头结构体大小
maplength = header->e_phnum * sizeof (ElfW(Phdr));
//如果程序头表格的偏移量加上maplength的长度还没达到读取上来的fbp->len的长度,就不需要继续读
if (header->e_phoff + maplength <= (size_t) fbp->len)
phdr = (void *) (fbp->buf + header->e_phoff);
//如果不足就需要从文件中读取出来
else
{
phdr = alloca (maplength);
__lseek (fd, header->e_phoff, SEEK_SET);
if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
{
errstring = N_("cannot read file data");
goto call_lose_errno;
}
}
这里对link_map的相关字段进行了赋值,同时对maplength、phdr都进行了赋值,这是在为elf格式的分节映射进行了准备。继续往下分析,在下面定义了这样一个结构体,通过注释可知这是在收集加载命令。
/* Scan the program header table, collecting its load commands.*/
struct loadcmd
{
ElfW(Addr) mapstart, mapend, dataend, allocend;
off_t mapoff;
int prot;
} loadcmds[l->l_phnum], *c;
size_t nloadcmds = 0;
接着对phdr(程序头表)进行遍历,根据每个程序头的type的类型做具体的操作。
for (ph = phdr;
ph < &phdr[l->l_phnum];
++ph)
switch (ph->p_type)
{
case PT_DYNAMIC:
l->l_ld = (void *) ph->p_vaddr;
l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
break;
case PT_PHDR:
l->l_phdr = (void *) ph->p_vaddr;
break;
case PT_LOAD:
/* A load command tells us to map in part of the file.
We record the load commands and process them all later.*/c = &loadcmds[nloadcmds++];
c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
c->dataend = ph->p_vaddr + ph->p_filesz;
c->allocend = ph->p_vaddr + ph->p_memsz;
c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
/* Determine whether there is a gap between the last segment
and this one.*/
if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
has_holes = true;
/* Optimize a common case.*/
#if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
c->prot = (PF_TO_PROT
>> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
#else
c->prot = 0;
if (ph->p_flags & PF_R)
c->prot |= PROT_READ;
if (ph->p_flags & PF_W)
c->prot |= PROT_WRITE;
if (ph->p_flags & PF_X)
c->prot |= PROT_EXEC;
#endif
break;
case PT_TLS:
if (ph->p_memsz == 0)
/* Nothing to do for an empty segment.*/
break;
l->l_tls_blocksize = ph->p_memsz;
l->l_tls_align = ph->p_align;
if (ph->p_align == 0)
l->l_tls_firstbyte_offset = 0;
else
l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
l->l_tls_initimage_size = ph->p_filesz;
/* Since we don't know the load address yet only store the
offset.We will adjust it later.*/
l->l_tls_initimage = (void *) ph->p_vaddr;
/* If not loading the initial set of shared libraries,
check whether we should permit loading a TLS segment.*/
if (__builtin_expect (l->l_type == lt_library, 1)
/* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
not set up TLS data structures, so don't use them now.*/
|| __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
{
/* Assign the next available module ID.*/
l->l_tls_modid = _dl_next_tls_modid ();
break;
}errval = 0;
errstring = N_("cannot handle TLS data");
goto call_lose;
break;
case PT_GNU_STACK:
stack_flags = ph->p_flags;
break;
case PT_GNU_RELRO:
l->l_relro_addr = ph->p_vaddr;
l->l_relro_size = ph->p_memsz;
break;
}
在elf文件的规范中,根据不同的program header 不同,要实现不同的功能,采用不同的处理策略。
在case PT_LOAD中
把所有的可以加载的节都在加载的数据结构中loadcmds中构建完成,然后继续往下看
/* Now process the load commands and map segments into memory.*/
c = loadcmds;
/* Length of the sections to be loaded.*/
maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
存在如上两个赋值,继续往下就开始了映射
ElfW(Addr) mappref;
mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
c->mapstart & GLRO(dl_use_load_bias))
- MAP_BASE_ADDR (l));
/* Remember which part of the address space this object uses.*/
l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
c->prot,
MAP_COPY|MAP_FILE,
fd, c->mapoff);
l->l_map_end = l->l_map_start + maplength;
l->l_addr = l->l_map_start - c->mapstart;
把整个文件都进行映射,并对link_map部分内容进行赋值
if (has_holes)
/* Change protection on the excess portion to disallow all access;
the portions we do not remap later will be inaccessible as if
unallocated.Then jump into the normal segment-mapping loop to
handle the portion of the segment past the end of the file
mapping.*/
__mprotect ((caddr_t) (l->l_addr + c->mapend),
loadcmds[nloadcmds - 1].mapstart - c->mapend,
PROT_NONE);
上述代码为对高地址进行保护,继续往下看
/* Remember which part of the address space this object uses.*/
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
l->l_contiguous = !has_holes;
while (c < &loadcmds[nloadcmds])
{
if (c->mapend > c->mapstart
/* Map the segment contents from the file.*/
&& (__mmap ((void *) (l->l_addr + c->mapstart),
c->mapend - c->mapstart, c->prot,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff)
== MAP_FAILED))
goto map_error;
postmap:
if (c->prot & PROT_EXEC)
l->l_text_end = l->l_addr + c->mapend;
if (l->l_phdr == 0
&& (ElfW(Off)) c->mapoff <= header->e_phoff
&& ((size_t) (c->mapend - c->mapstart + c->mapoff)
>= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
/* Found the program header in this segment.*/
l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file.*/
ElfW(Addr) zero, zeroend, zeropage;
zero = l->l_addr + c->dataend;
zeroend = l->l_addr + c->allocend;
zeropage = ((zero + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
if (zeroend < zeropage)
/* All the extra data is in the last page of the segment.
We can just zero it.*/
zeropage = zeroend;
if (zeropage > zero)
{
/* Zero the final part of the last page of the segment.*/
if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
{
/* Dag nab it.*/
if (__mprotect ((caddr_t) (zero
& ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
{
errstring = N_("cannot change memory protections");
goto call_lose_errno;
}
}
memset ((void *) zero, '\0', zeropage - zero);
if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
__mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot);
}if (zeroend > zeropage)
{
/* Map the remaining zero pages in from the zero fill FD.*/
caddr_t mapat;
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
-1, 0);
if (__builtin_expect (mapat == MAP_FAILED, 0))
{
errstring = N_("cannot map zero-fill pages");
goto call_lose_errno;
}
}
} ++c;
}
上述代码根据在前面从PT_LOAD program header 得到的文件映射的操作属性进行修改,但在zeroend>zerorpage的时候不同,把它映射成为进程独享的数据空间。这也就是一般的初始化数据区BSS的地方。因为zeroend是在文件中的映射的页面对齐尾地址,而zeropage是文件中的内容映射的页面对齐尾地址,这其中的差就是为未初始化数据准备的,要把它的属性改成可写的,且全为0。
if (l->l_ld == 0)
{
if (__builtin_expect (type == ET_DYN, 0))
{
errstring = N_("object file has no dynamic section");
goto call_lose;
}
}
else
l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
继续往下看
调用了
elf_get_dynamic_info (l, NULL);
这里调用的函数elf_get_dynamic_info是在加载过程中最重要的一个之一,因为在这之后的几乎所有的对动态链接管理的内容都要用要与这里的l_info数据组相关。进入到elf_get_dynamic_info函数中
inline void __attribute__ ((unused, always_inline))
elf_get_dynamic_info (struct link_map *l, ElfW(Dyn) *temp)
{
ElfW(Dyn) *dyn = l->l_ld;
ElfW(Dyn) **info;
#if __ELF_NATIVE_CLASS == 32
typedef Elf32_Word d_tag_utype;
#elif __ELF_NATIVE_CLASS == 64
typedef Elf64_Xword d_tag_utype;
#endif#ifndef RTLD_BOOTSTRAP
if (dyn == NULL)
return;
#endifinfo = l->l_info;
while (dyn->d_tag != DT_NULL)
{
if ((d_tag_utype) dyn->d_tag < DT_NUM)
info[dyn->d_tag] = dyn;
else if (dyn->d_tag >= DT_LOPROC &&
dyn->d_tag < DT_LOPROC + DT_THISPROCNUM)
info[dyn->d_tag - DT_LOPROC + DT_NUM] = dyn;
else if ((d_tag_utype) DT_VERSIONTAGIDX (dyn->d_tag) < DT_VERSIONTAGNUM)
info[VERSYMIDX (dyn->d_tag)] = dyn;
else if ((d_tag_utype) DT_EXTRATAGIDX (dyn->d_tag) < DT_EXTRANUM)
info[DT_EXTRATAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM] = dyn;
else if ((d_tag_utype) DT_VALTAGIDX (dyn->d_tag) < DT_VALNUM)
info[DT_VALTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM] = dyn;
else if ((d_tag_utype) DT_ADDRTAGIDX (dyn->d_tag) < DT_ADDRNUM)
info[DT_ADDRTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM] = dyn;
++dyn;
}#define DL_RO_DYN_TEMP_CNT 8#ifndef DL_RO_DYN_SECTION
/* Don't adjust .dynamic unnecessarily.*/
if (l->l_addr != 0)
{
ElfW(Addr) l_addr = l->l_addr;
int cnt = 0;
# define ADJUST_DYN_INFO(tag) \
do\
if (info[tag] != NULL)\
{\
if (temp)\
{\
temp[cnt].d_tag = info[tag]->d_tag;
\
temp[cnt].d_un.d_ptr = info[tag]->d_un.d_ptr + l_addr;
\
info[tag] = temp + cnt++;
\
}\
else\
info[tag]->d_un.d_ptr += l_addr;
\
}\
while (0)ADJUST_DYN_INFO (DT_HASH);
ADJUST_DYN_INFO (DT_PLTGOT);
ADJUST_DYN_INFO (DT_STRTAB);
ADJUST_DYN_INFO (DT_SYMTAB);
# if ! ELF_MACHINE_NO_RELA
ADJUST_DYN_INFO (DT_RELA);
# endif
# if ! ELF_MACHINE_NO_REL
ADJUST_DYN_INFO (DT_REL);
# endif
ADJUST_DYN_INFO (DT_JMPREL);
ADJUST_DYN_INFO (VERSYMIDX (DT_VERSYM));
ADJUST_DYN_INFO (DT_ADDRTAGIDX (DT_GNU_HASH) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM);
# undef ADJUST_DYN_INFO
assert (cnt <= DL_RO_DYN_TEMP_CNT);
}
#endif
if (info[DT_PLTREL] != NULL)
{
#if ELF_MACHINE_NO_RELA
assert (info[DT_PLTREL]->d_un.d_val == DT_REL);
#elif ELF_MACHINE_NO_REL
assert (info[DT_PLTREL]->d_un.d_val == DT_RELA);
#else
assert (info[DT_PLTREL]->d_un.d_val == DT_REL
|| info[DT_PLTREL]->d_un.d_val == DT_RELA);
#endif
}
#if ! ELF_MACHINE_NO_RELA
if (info[DT_RELA] != NULL)
assert (info[DT_RELAENT]->d_un.d_val == sizeof (ElfW(Rela)));
# endif
# if ! ELF_MACHINE_NO_REL
if (info[DT_REL] != NULL)
assert (info[DT_RELENT]->d_un.d_val == sizeof (ElfW(Rel)));
#endif
#ifdef RTLD_BOOTSTRAP
/* Only the bind now flags are allowed.*/
assert (info[VERSYMIDX (DT_FLAGS_1)] == NULL
|| (info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val & ~DF_1_NOW) == 0);
assert (info[DT_FLAGS] == NULL
|| (info[DT_FLAGS]->d_un.d_val & ~DF_BIND_NOW) == 0);
/* Flags must not be set for ld.so.*/
assert (info[DT_RUNPATH] == NULL);
assert (info[DT_RPATH] == NULL);
#else
if (info[DT_FLAGS] != NULL)
{
/* Flags are used.Translate to the old form where available.
Since these l_info entries are only tested for NULL pointers it
is ok if they point to the DT_FLAGS entry.*/
l->l_flags = info[DT_FLAGS]->d_un.d_val;
if (l->l_flags & DF_SYMBOLIC)
info[DT_SYMBOLIC] = info[DT_FLAGS];
if (l->l_flags & DF_TEXTREL)
info[DT_TEXTREL] = info[DT_FLAGS];
if (l->l_flags & DF_BIND_NOW)
info[DT_BIND_NOW] = info[DT_FLAGS];
}
if (info[VERSYMIDX (DT_FLAGS_1)] != NULL)
{
l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
if (l->l_flags_1 & DF_1_NOW)
info[DT_BIND_NOW] = info[VERSYMIDX (DT_FLAGS_1)];
}
if (info[DT_RUNPATH] != NULL)
/* If both RUNPATH and RPATH are given, the latter is ignored.*/
info[DT_RPATH] = NULL;
#endif
}
上面的__attribute__ 中的unused 是为了消除编译器在-Wall 情况下对于其中可能没有用到在函数中的局部变量发出警告,而alwayse_inline,很好解释,就是内联函数的强制标志。
在elf_get_dynamic_info中对i_info进行了填充
info = l->l_info;
while (dyn->d_tag != DT_NULL)
{
if ((d_tag_utype) dyn->d_tag < DT_NUM)
info[dyn->d_tag] = dyn;
else if (dyn->d_tag >= DT_LOPROC &&
dyn->d_tag < DT_LOPROC + DT_THISPROCNUM)
info[dyn->d_tag - DT_LOPROC + DT_NUM] = dyn;
else if ((d_tag_utype) DT_VERSIONTAGIDX (dyn->d_tag) < DT_VERSIONTAGNUM)
info[VERSYMIDX (dyn->d_tag)] = dyn;
else if ((d_tag_utype) DT_EXTRATAGIDX (dyn->d_tag) < DT_EXTRANUM)
info[DT_EXTRATAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM] = dyn;
else if ((d_tag_utype) DT_VALTAGIDX (dyn->d_tag) < DT_VALNUM)
info[DT_VALTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM] = dyn;
else if ((d_tag_utype) DT_ADDRTAGIDX (dyn->d_tag) < DT_ADDRNUM)
info[DT_ADDRTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM] = dyn;
++dyn;
}
这为之后有很大的作用,因为这些节是可以找到如函数名与定位信息的,这里的的妙处是把数组的偏移量与d_tag相关联,代码简洁。
至此对_dl_map_object_from_fd就解析完毕了,感觉还是有很多疑惑,在后续学习中希望能填补这些疑惑,大家有问题也可以评论交流。
二、link_map结构
link_map是描述加载的共享对象的结构。 ‘l_next’和'l_prev'成员形成启动时加载的所有共享对象的链。 这些数据结构存在于运行时动态链接器使用的空间中;修改它们可能会带来灾难性的后果。如有必要,此数据结构将来可能会发生变化。用户级别程序必须避免定义此类对象。
link_map的定义放到附录中,我们直接来看我给出了关于link_map各个字段的翻译解释:
struct link_map
{
/* 前几个成员是调试器协议的一部分。
这与SVR4中使用的格式相同。*/ElfW(Addr) l_addr;
/* 共享对象加载的基地址*/
char *l_name;
/* 文件名对象的绝对地址*/
ElfW(Dyn) *l_ld;
/* 共享对象的动态部分 */
struct link_map *l_next, *l_prev;
/* 加载对象的链*//* 以下所有成员都是动态链接器的内部成员,他们可能在没有通知的情况下改变。*//* 这是一个与指针不同的元素ld.so的这种类型的相同副本,当它比一个命名空间被用于更多时。 */
struct link_map *l_real;
/* link map属于的命名空间的编号。*/
Lmid_t l_ns;
struct libname_list *l_libname;
/* 索引指针指向动态部分。
[0,DT_NUM]由与处理器无关的标记索引
[DT_NUM,DT_NUM + DT_THISPROCNUM]由标签减去DT_LOPROC索引
[DT_NUM + DT_THISPROCNUM,DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM)是
由DT_VERSIONTAGIDX(tagvalue)索引
[DT_NUM+ DT_THISPROCNUM+ DT_VERSIONTAGNUM,
DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM + DT_EXTRANUM)被索引
DT_EXTRATAGIDX(tagvalue)
[DT_NUM+ DT_THISPROCNUM+ DT_VERSIONTAGNUM+ DT_EXTRANUM,
DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM)是
由DT_VALTAGIDX(tagvalue)和索引
[DT_NUM+ DT_THISPROCNUM+ DT_VERSIONTAGNUM+ DT_EXTRANUM+ DT_VALNUM,
DT_NUM+ DT_THISPROCNUM+ DT_VERSIONTAGNUM+ DT_EXTRANUM+ DT_VALNUM+ DT_ADDRNUM)
由DT_ADDRTAGIDX(tagvalue)索引,参见 */ElfW(Dyn) *l_info[DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM
+ DT_EXTRANUM + DT_VALNUM + DT_ADDRNUM];
const ElfW(Phdr) *l_phdr;
/* 指向核心中的程序头表的指针*/
ElfW(Addr) l_entry;
/* 真正的入口地址*/
ElfW(Half) l_phnum;
/* 入口的数量*/
ElfW(Half) l_ldnum;
/* 动态部分分节数量*//* 数组中包含DT_NEEDED依赖项及其依赖项的数组
符号查找的依赖顺序(有和没有
一式两份)。 依赖项之前没有条目
已装载。 */
struct r_scope_elem l_searchlist;
/* 我们需要一个特殊的搜索列表来处理标有DT_SYMBOLIC的对象*/
struct r_scope_elem l_symbolic_searchlist;
/* 首先加载此对象的依赖对象。*/
struct link_map *l_loader;
/* 带有版本名称的数组 */
struct r_found_version *l_versions;
unsigned int l_nversions;
/* 符号哈希表 */
Elf_Symndx l_nbuckets;
Elf32_Word l_gnu_bitmask_idxbits;
Elf32_Word l_gnu_shift;
const ElfW(Addr) *l_gnu_bitmask;
union
{
const Elf32_Word *l_gnu_buckets;
const Elf_Symndx *l_chain;
};
union
{
const Elf32_Word *l_gnu_chain_zero;
const Elf_Symndx *l_buckets;
};
unsigned int l_direct_opencount;
/* dlopen/dlclose的引用计数 */
enum/* 目标文件来源*/
{
lt_executable,/* 可执行程序*/
lt_library,/* 被主程序所需要的库*/
lt_loaded/* 额外的运行时加载的共享库*/
} l_type:2;
unsigned int l_relocated:1;
/* 在重定位完成前不为0*/
unsigned int l_init_called:1;
/* 如果被DT_INIT函数调用则不为零*/
unsigned int l_global:1;
/* 不为0如果对象定义为 _dl_global_scope.*/
unsigned int l_reserved:2;
/* 保留供内部使用*/
unsigned int l_phdr_allocated:1;
/* 如果数据结构指向非零由`l_phdr'分配。 */
unsigned int l_soname_added:1;
/* 如果SONAME肯定在l_libname列表中,则为非零。*/
unsigned int l_faked:1;
/* 如果这是没有关联文件的伪造描述符,则为非零。 */
unsigned int l_need_tls_init:1;
/* 如果在重定位完成时应在此链接图上调用GL(dl_init_static_tls),则为非零 */
unsigned int l_auditing:1;
/* 如果DSO用于审计,则非零。 */
unsigned int l_audit_any_plt:1;
/* 如果至少有一个审计模块对PLT拦截感兴趣,则为非零。*/
unsigned int l_removed:1;
/* Nozero如果该对象不能再使用,因为它被删除。*/
unsigned int l_contiguous:1;
/* 如果段间孔被保护或者根本没有孔,则非零。*/
unsigned int l_symbolic_in_local_scope:1;
/* 如果LD_TRACE_PRELINKING = 1期间的l_local_scope包含任何DT_SYMBOLIC库,则为非零。*//* 收集有关自己的RPATH目录的信息。 */
struct r_search_path_struct l_rpath_dirs;
/* 在分析时收集重新定位的结果。*/
struct reloc_result
{
DL_FIXUP_VALUE_TYPE addr;
struct link_map *bound;
unsigned int boundndx;
uint32_t enterexit;
unsigned int flags;
} *l_reloc_result;
/* 指向版本信息的指针(如果可用)。 */
ElfW(Versym) *l_versyms;
/* 指定找到此对象的路径的字符串。 */
const char *l_origin;
/* 此对象的内存映射的开始和结束。 l_map_start不必与l_addr相同。*/
ElfW(Addr) l_map_start, l_map_end;
/* 映射的可执行部分结束。*/
ElfW(Addr) l_text_end;
/* 'l_scope'的默认数组。 */
struct r_scope_elem *l_scope_mem[4];
/* 为'l_scope'分配的数组大小。*/
size_t l_scope_max;
/* 这是一个定义此链接映射的查找范围的数组。 最初最多有三个不同的范围列表。 */
struct r_scope_elem **l_scope;
/* 一个类似的数组,这次只有本地范围。 偶尔使用它。*/
struct r_scope_elem *l_local_scope[2];
/* 保留此信息以检查共享对象是否与已加载的共享对象相同。*/
dev_t l_dev;
ino64_t l_ino;
/* 收集有关自己的RUNPATH目录的信息。*/
struct r_search_path_struct l_runpath_dirs;
/* init和fini调用顺序的对象列表。 */
struct link_map **l_initfini;
/* 启动时生成的init和fini列表,在动态加载对象时保存。*/
struct link_map **l_orig_initfini;
/* 通过符号绑定引入的依赖项列表。 */
struct link_map_reldeps
{
unsigned int act;
struct link_map *list[];
} *l_reldeps;
unsigned int l_reldepsmax;
/* 如果使用DSO,则为非零。 */
unsigned int l_used;
/* 不同的标志标识*/
ElfW(Word) l_feature_1;
ElfW(Word) l_flags_1;
ElfW(Word) l_flags;
/* 暂时用在`dl_close'中。 */
int l_idx;
struct link_map_machine l_mach;
struct
{
const ElfW(Sym) *sym;
int type_class;
struct link_map *value;
const ElfW(Sym) *ret;
} l_lookup_cache;
/* 线程本地存储相关信息。 *//* 开始初始化镜像 */
void *l_tls_initimage;
/* 初始化镜像的大小*/
size_t l_tls_initimage_size;
/*TLS block的大小 */
size_t l_tls_blocksize;
/* TLS块的对齐要求*/
size_t l_tls_align;
/* 第一个字节模块对齐的偏移量*/
size_t l_tls_firstbyte_offset;
#ifndef NO_TLS_OFFSET
# define NO_TLS_OFFSET 0
#endif
#ifndef FORCED_DYNAMIC_TLS_OFFSET
# if NO_TLS_OFFSET == 0
#define FORCED_DYNAMIC_TLS_OFFSET 1
# elif NO_TLS_OFFSET == -1
#define FORCED_DYNAMIC_TLS_OFFSET -2
# else
#error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
# endif
#endif/* 对于启动时出现的对象:静态TLS块中的偏移量。*/
ptrdiff_t l_tls_offset;
/* dtv数组中模块的索引。*/
size_t l_tls_modid;
/* 用于在重定位完成后更改权限的信息。*/
ElfW(Addr) l_relro_addr;
size_t l_relro_size;
unsigned long long int l_serial;
/* 审计信息。 这个阵列显然必须是结构中的最后一个。 切勿在其后添加任何东西*/
struct auditstate
{
uintptr_t cookie;
unsigned int bindflags;
} l_audit[0];
};
附录
一、_dl_map_object_from_fd函数内容:
static
#endif
struct link_map *
_dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
char *realname, struct link_map *loader, int l_type,
int mode, void **stack_endp, Lmid_t nsid)
{
struct link_map *l = NULL;
const ElfW(Ehdr) *header;
const ElfW(Phdr) *phdr;
const ElfW(Phdr) *ph;
size_t maplength;
int type;
struct stat64 st;
/* Initialize to keep the compiler happy.*/
const char *errstring = NULL;
int errval = 0;
struct r_debug *r = _dl_debug_initialize (0, nsid);
bool make_consistent = false;
/* Get file information.*/
if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
{
errstring = N_("cannot stat shared object");
call_lose_errno:
errval = errno;
call_lose:
lose (errval, fd, name, realname, l, errstring,
make_consistent ? r : NULL);
}/* Look again to see if the real name matched another already loaded.*/
for (l = GL(dl_ns)[nsid]._ns_loaded;
l;
l = l->l_next)
if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
{
/* The object is already loaded.
Just bump its reference count and return it.*/
__close (fd);
/* If the name is not in the list of names for this object add
it.*/
free (realname);
add_name_to_object (l, name);
return l;
}#ifdef SHARED
/* When loading into a namespace other than the base one we must
avoid loading ld.so since there can only be one copy.Ever.*/
if (__builtin_expect (nsid != LM_ID_BASE, 0)
&& ((st.st_ino == GL(dl_rtld_map).l_ino
&& st.st_dev == GL(dl_rtld_map).l_dev)
|| _dl_name_match_p (name, &GL(dl_rtld_map))))
{
/* This is indeed ld.so.Create a new link_map which refers to
the real one for almost everything.*/
l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
if (l == NULL)
goto fail_new;
/* Refer to the real descriptor.*/
l->l_real = &GL(dl_rtld_map);
/* No need to bump the refcount of the real object, ld.so will
never be unloaded.*/
__close (fd);
/* Add the map for the mirrored object to the object list.*/
_dl_add_to_namespace_list (l, nsid);
return l;
}
#endifif (mode & RTLD_NOLOAD)
{
/* We are not supposed to load the object unless it is already
loaded.So return now.*/
free (realname);
__close (fd);
return NULL;
}/* Print debugging message.*/
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
_dl_debug_printf ("file=%s [%lu];
generating link map\n", name, nsid);
/* This is the ELF header.We read it in `open_verify'.*/
header = (void *) fbp->buf;
#ifndef MAP_ANON
# define MAP_ANON 0
if (_dl_zerofd == -1)
{
_dl_zerofd = _dl_sysdep_open_zero_fill ();
if (_dl_zerofd == -1)
{
free (realname);
__close (fd);
_dl_signal_error (errno, NULL, NULL,
N_("cannot open zero fill device"));
}
}
#endif/* Signal that we are going to add new objects.*/
if (r->r_state == RT_CONSISTENT)
{
#ifdef SHARED
/* Auditing checkpoint: we are going to add new objects.*/
if ((mode & __RTLD_AUDIT) == 0
&& __builtin_expect (GLRO(dl_naudit) > 0, 0))
{
struct link_map *head = GL(dl_ns)[nsid]._ns_loaded;
/* Do not call the functions for any auditing object.*/
if (head->l_auditing == 0)
{
struct audit_ifaces *afct = GLRO(dl_audit);
for (unsigned int cnt = 0;
cnt < GLRO(dl_naudit);
++cnt)
{
if (afct->activity != NULL)
afct->activity (&head->l_audit[cnt].cookie, LA_ACT_ADD);
afct = afct->next;
}
}
}
#endif/* Notify the debugger we have added some objects.We need to
call _dl_debug_initialize in a static program in case dynamic
linking has not been used before.*/
r->r_state = RT_ADD;
_dl_debug_state ();
make_consistent = true;
}
else
assert (r->r_state == RT_ADD);
/* Enter the new object in the list of loaded objects.*/
l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
if (__builtin_expect (l == NULL, 0))
{
#ifdef SHARED
fail_new:
#endif
errstring = N_("cannot create shared object descriptor");
goto call_lose_errno;
}/* Extract the remaining details we need from the ELF header
and then read in the program header table.*/
l->l_entry = header->e_entry;
type = header->e_type;
l->l_phnum = header->e_phnum;
maplength = header->e_phnum * sizeof (ElfW(Phdr));
if (header->e_phoff + maplength <= (size_t) fbp->len)
phdr = (void *) (fbp->buf + header->e_phoff);
else
{
phdr = alloca (maplength);
__lseek (fd, header->e_phoff, SEEK_SET);
if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
{
errstring = N_("cannot read file data");
goto call_lose_errno;
}
}/* On most platforms presume that PT_GNU_STACK is absent and the stack is
* executable.Other platforms default to a nonexecutable stack and don't
* need PT_GNU_STACK to do so.*/
uint_fast16_t stack_flags = DEFAULT_STACK_PERMS;
{
/* Scan the program header table, collecting its load commands.*/
struct loadcmd
{
ElfW(Addr) mapstart, mapend, dataend, allocend;
off_t mapoff;
int prot;
} loadcmds[l->l_phnum], *c;
size_t nloadcmds = 0;
bool has_holes = false;
/* The struct is initialized to zero so this is not necessary:
l->l_ld = 0;
l->l_phdr = 0;
l->l_addr = 0;
*/
for (ph = phdr;
ph < &phdr[l->l_phnum];
++ph)
switch (ph->p_type)
{
/* These entries tell us where to find things once the file's
segments are mapped in.We record the addresses it says
verbatim, and later correct for the run-time load address.*/
case PT_DYNAMIC:
l->l_ld = (void *) ph->p_vaddr;
l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
break;
case PT_PHDR:
l->l_phdr = (void *) ph->p_vaddr;
break;
case PT_LOAD:
/* A load command tells us to map in part of the file.
We record the load commands and process them all later.*/
if (__builtin_expect ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0,
0))
{
errstring = N_("ELF load command alignment not page-aligned");
goto call_lose;
}
if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
& (ph->p_align - 1)) != 0, 0))
{
errstring
= N_("ELF load command address/offset not properly aligned");
goto call_lose;
}c = &loadcmds[nloadcmds++];
c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
c->dataend = ph->p_vaddr + ph->p_filesz;
c->allocend = ph->p_vaddr + ph->p_memsz;
c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
/* Determine whether there is a gap between the last segment
and this one.*/
if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
has_holes = true;
/* Optimize a common case.*/
#if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
c->prot = (PF_TO_PROT
>> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
#else
c->prot = 0;
if (ph->p_flags & PF_R)
c->prot |= PROT_READ;
if (ph->p_flags & PF_W)
c->prot |= PROT_WRITE;
if (ph->p_flags & PF_X)
c->prot |= PROT_EXEC;
#endif
break;
case PT_TLS:
if (ph->p_memsz == 0)
/* Nothing to do for an empty segment.*/
break;
l->l_tls_blocksize = ph->p_memsz;
l->l_tls_align = ph->p_align;
if (ph->p_align == 0)
l->l_tls_firstbyte_offset = 0;
else
l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
l->l_tls_initimage_size = ph->p_filesz;
/* Since we don't know the load address yet only store the
offset.We will adjust it later.*/
l->l_tls_initimage = (void *) ph->p_vaddr;
/* If not loading the initial set of shared libraries,
check whether we should permit loading a TLS segment.*/
if (__builtin_expect (l->l_type == lt_library, 1)
/* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
not set up TLS data structures, so don't use them now.*/
|| __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
{
/* Assign the next available module ID.*/
l->l_tls_modid = _dl_next_tls_modid ();
break;
}#ifdef SHARED
if (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0)
/* We are loading the executable itself when the dynamic linker
was executed directly.The setup will happen later.*/
break;
/* In a static binary there is no way to tell if we dynamically
loaded libpthread.*/
if (GL(dl_error_catch_tsd) == &_dl_initial_error_catch_tsd)
#endif
{
/* We have not yet loaded libpthread.
We can do the TLS setup right now!*/void *tcb;
/* The first call allocates TLS bookkeeping data structures.
Then we allocate the TCB for the initial thread.*/
if (__builtin_expect (_dl_tls_setup (), 0)
|| __builtin_expect ((tcb = _dl_allocate_tls (NULL)) == NULL,
0))
{
errval = ENOMEM;
errstring = N_("\
cannot allocate TLS data structures for initial thread");
goto call_lose;
}/* Now we install the TCB in the thread register.*/
errstring = TLS_INIT_TP (tcb, 0);
if (__builtin_expect (errstring == NULL, 1))
{
/* Now we are all good.*/
l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
break;
}/* The kernel is too old or somesuch.*/
errval = 0;
_dl_deallocate_tls (tcb, 1);
goto call_lose;
}/* Uh-oh, the binary expects TLS support but we cannot
provide it.*/
errval = 0;
errstring = N_("cannot handle TLS data");
goto call_lose;
break;
case PT_GNU_STACK:
stack_flags = ph->p_flags;
break;
case PT_GNU_RELRO:
l->l_relro_addr = ph->p_vaddr;
l->l_relro_size = ph->p_memsz;
break;
}if (__builtin_expect (nloadcmds == 0, 0))
{
/* This only happens for a bogus object that will be caught with
another error below.But we don't want to go through the
calculations below using NLOADCMDS - 1.*/
errstring = N_("object file has no loadable segments");
goto call_lose;
}/* Now process the load commands and map segments into memory.*/
c = loadcmds;
/* Length of the sections to be loaded.*/
maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
if (__builtin_expect (type, ET_DYN) == ET_DYN)
{
/* This is a position-independent shared object.We can let the
kernel map it anywhere it likes, but we must have space for all
the segments in their specified positions relative to the first.
So we map the first segment without MAP_FIXED, but with its
extent increased to cover all the segments.Then we remove
access from excess portion, and there is known sufficient space
there to remap from the later segments.As a refinement, sometimes we have an address that we would
prefer to map such objects at;
but this is only a preference,
the OS can do whatever it likes. */
ElfW(Addr) mappref;
mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
c->mapstart & GLRO(dl_use_load_bias))
- MAP_BASE_ADDR (l));
/* Remember which part of the address space this object uses.*/
l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
c->prot,
MAP_COPY|MAP_FILE,
fd, c->mapoff);
if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
{
map_error:
errstring = N_("failed to map segment from shared object");
goto call_lose_errno;
} l->l_map_end = l->l_map_start + maplength;
l->l_addr = l->l_map_start - c->mapstart;
if (has_holes)
/* Change protection on the excess portion to disallow all access;
the portions we do not remap later will be inaccessible as if
unallocated.Then jump into the normal segment-mapping loop to
handle the portion of the segment past the end of the file
mapping.*/
__mprotect ((caddr_t) (l->l_addr + c->mapend),
loadcmds[nloadcmds - 1].mapstart - c->mapend,
PROT_NONE);
l->l_contiguous = 1;
goto postmap;
}/* This object is loaded at a fixed address.This must never
happen for objects loaded with dlopen().*/
if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
{
errstring = N_("cannot dynamically load executable");
goto call_lose;
}/* Notify ELF_PREFERRED_ADDRESS that we have to load this one
fixed.*/
ELF_FIXED_ADDRESS (loader, c->mapstart);
/* Remember which part of the address space this object uses.*/
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
l->l_contiguous = !has_holes;
while (c < &loadcmds[nloadcmds])
{
if (c->mapend > c->mapstart
/* Map the segment contents from the file.*/
&& (__mmap ((void *) (l->l_addr + c->mapstart),
c->mapend - c->mapstart, c->prot,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff)
== MAP_FAILED))
goto map_error;
postmap:
if (c->prot & PROT_EXEC)
l->l_text_end = l->l_addr + c->mapend;
if (l->l_phdr == 0
&& (ElfW(Off)) c->mapoff <= header->e_phoff
&& ((size_t) (c->mapend - c->mapstart + c->mapoff)
>= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
/* Found the program header in this segment.*/
l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file.*/
ElfW(Addr) zero, zeroend, zeropage;
zero = l->l_addr + c->dataend;
zeroend = l->l_addr + c->allocend;
zeropage = ((zero + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
if (zeroend < zeropage)
/* All the extra data is in the last page of the segment.
We can just zero it.*/
zeropage = zeroend;
if (zeropage > zero)
{
/* Zero the final part of the last page of the segment.*/
if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
{
/* Dag nab it.*/
if (__mprotect ((caddr_t) (zero
& ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
{
errstring = N_("cannot change memory protections");
goto call_lose_errno;
}
}
memset ((void *) zero, '\0', zeropage - zero);
if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
__mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot);
}if (zeroend > zeropage)
{
/* Map the remaining zero pages in from the zero fill FD.*/
caddr_t mapat;
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
-1, 0);
if (__builtin_expect (mapat == MAP_FAILED, 0))
{
errstring = N_("cannot map zero-fill pages");
goto call_lose_errno;
}
}
} ++c;
}
}if (l->l_ld == 0)
{
if (__builtin_expect (type == ET_DYN, 0))
{
errstring = N_("object file has no dynamic section");
goto call_lose;
}
}
else
l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
elf_get_dynamic_info (l, NULL);
/* Make sure we are not dlopen'ing an object that has the
DF_1_NOOPEN flag set.*/
if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)
&& (mode & __RTLD_DLOPEN))
{
/* We are not supposed to load this object.Free all resources.*/
__munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
if (!l->l_libname->dont_free)
free (l->l_libname);
if (l->l_phdr_allocated)
free ((void *) l->l_phdr);
errstring = N_("shared object cannot be dlopen()ed");
goto call_lose;
}if (l->l_phdr == NULL)
{
/* The program header is not contained in any of the segments.
We have to allocate memory ourself and copy it over from out
temporary place.*/
ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
* sizeof (ElfW(Phdr)));
if (newp == NULL)
{
errstring = N_("cannot allocate memory for program header");
goto call_lose_errno;
}l->l_phdr = memcpy (newp, phdr,
(header->e_phnum * sizeof (ElfW(Phdr))));
l->l_phdr_allocated = 1;
}
else
/* Adjust the PT_PHDR value by the runtime load address.*/
l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
{
if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso),
0) != 0)
{
errstring = N_("invalid caller");
goto call_lose;
}/* The stack is presently not executable, but this module
requires that it be executable.We must change the
protection of the variable which contains the flags used in
the mprotect calls.*/
#ifdef SHARED
if ((mode & (__RTLD_DLOPEN | __RTLD_AUDIT)) == __RTLD_DLOPEN)
{
const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
struct link_map *const m = &GL(dl_rtld_map);
const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
+ m->l_relro_size)
& -GLRO(dl_pagesize));
if (__builtin_expect (p + s <= relro_end, 1))
{
/* The variable lies in the region protected by RELRO.*/
__mprotect ((void *) p, s, PROT_READ|PROT_WRITE);
__stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
__mprotect ((void *) p, s, PROT_READ);
}
else
__stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
}
else
#endif
__stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
#ifdef check_consistency
check_consistency ();
#endiferrval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
if (errval)
{
errstring = N_("\
cannot enable executable stack as shared object requires");
goto call_lose;
}
}/* Adjust the address of the TLS initialization image.*/
if (l->l_tls_initimage != NULL)
l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
/* We are done mapping in the file.We no longer need the descriptor.*/
if (__builtin_expect (__close (fd) != 0, 0))
{
errstring = N_("cannot close file descriptor");
goto call_lose_errno;
}
/* Signal that we closed the file.*/
fd = -1;
if (l->l_type == lt_library && type == ET_EXEC)
l->l_type = lt_executable;
l->l_entry += l->l_addr;
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
_dl_debug_printf ("\
dynamic: 0x%0*lxbase: 0x%0*lxsize: 0x%0*Zx\n\
entry: 0x%0*lxphdr: 0x%0*lxphnum:%*u\n\n",
(int) sizeof (void *) * 2,
(unsigned long int) l->l_ld,
(int) sizeof (void *) * 2,
(unsigned long int) l->l_addr,
(int) sizeof (void *) * 2, maplength,
(int) sizeof (void *) * 2,
(unsigned long int) l->l_entry,
(int) sizeof (void *) * 2,
(unsigned long int) l->l_phdr,
(int) sizeof (void *) * 2, l->l_phnum);
/* Set up the symbol hash table.*/
_dl_setup_hash (l);
/* If this object has DT_SYMBOLIC set modify now its scope.We don't
have to do this for the main map.*/
if ((mode & RTLD_DEEPBIND) == 0
&& __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
&& &l->l_searchlist != l->l_scope[0])
{
/* Create an appropriate searchlist.It contains only this map.
This is the definition of DT_SYMBOLIC in SysVr4.*/
l->l_symbolic_searchlist.r_list[0] = l;
l->l_symbolic_searchlist.r_nlist = 1;
/* Now move the existing entries one back.*/
memmove (&l->l_scope[1], &l->l_scope[0],
(l->l_scope_max - 1) * sizeof (l->l_scope[0]));
/* Now add the new entry.*/
l->l_scope[0] = &l->l_symbolic_searchlist;
}/* Remember whether this object must be initialized first.*/
if (l->l_flags_1 & DF_1_INITFIRST)
GL(dl_initfirst) = l;
/* Finally the file information.*/
l->l_dev = st.st_dev;
l->l_ino = st.st_ino;
/* When we profile the SONAME might be needed for something else but
loading.Add it right away.*/
if (__builtin_expect (GLRO(dl_profile) != NULL, 0)
&& l->l_info[DT_SONAME] != NULL)
add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
+ l->l_info[DT_SONAME]->d_un.d_val));
/* Now that the object is fully initialized add it to the object list.*/
_dl_add_to_namespace_list (l, nsid);
#ifdef SHARED
/* Auditing checkpoint: we have a new object.*/
if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
&& !GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing)
{
struct audit_ifaces *afct = GLRO(dl_audit);
for (unsigned int cnt = 0;
cnt < GLRO(dl_naudit);
++cnt)
{
if (afct->objopen != NULL)
{
l->l_audit[cnt].bindflags
= afct->objopen (l, nsid, &l->l_audit[cnt].cookie);
l->l_audit_any_plt |= l->l_audit[cnt].bindflags != 0;
}afct = afct->next;
}
}
#endifreturn l;
}
二、link_map
struct link_map
{
/* These first few members are part of the protocol with the debugger.
This is the same format used in SVR4.*/ElfW(Addr) l_addr;
/* Base address shared object is loaded at.*/
char *l_name;
/* Absolute file name object was found in.*/
ElfW(Dyn) *l_ld;
/* Dynamic section of the shared object.*/
struct link_map *l_next, *l_prev;
/* Chain of loaded objects.*//* All following members are internal to the dynamic linker.
They may change without notice.*//* This is an element which is only ever different from a pointer to
the very same copy of this type for ld.so when it is used in more
than one namespace.*/
struct link_map *l_real;
/* Number of the namespace this link map belongs to.*/
Lmid_t l_ns;
struct libname_list *l_libname;
/* Indexed pointers to dynamic section.
[0,DT_NUM) are indexed by the processor-independent tags.
[DT_NUM,DT_NUM+DT_THISPROCNUM) are indexed by the tag minus DT_LOPROC.
[DT_NUM+DT_THISPROCNUM,DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM) are
indexed by DT_VERSIONTAGIDX(tagvalue).
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM) are indexed by
DT_EXTRATAGIDX(tagvalue).
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM) are
indexed by DT_VALTAGIDX(tagvalue) and
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM+DT_ADDRNUM)
are indexed by DT_ADDRTAGIDX(tagvalue), see .*/ElfW(Dyn) *l_info[DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM
+ DT_EXTRANUM + DT_VALNUM + DT_ADDRNUM];
const ElfW(Phdr) *l_phdr;
/* Pointer to program header table in core.*/
ElfW(Addr) l_entry;
/* Entry point location.*/
ElfW(Half) l_phnum;
/* Number of program header entries.*/
ElfW(Half) l_ldnum;
/* Number of dynamic segment entries.*//* Array of DT_NEEDED dependencies and their dependencies, in
dependency order for symbol lookup (with and without
duplicates).There is no entry before the dependencies have
been loaded.*/
struct r_scope_elem l_searchlist;
/* We need a special searchlist to process objects marked with
DT_SYMBOLIC.*/
struct r_scope_elem l_symbolic_searchlist;
/* Dependent object that first caused this object to be loaded.*/
struct link_map *l_loader;
/* Array with version names.*/
struct r_found_version *l_versions;
unsigned int l_nversions;
/* Symbol hash table.*/
Elf_Symndx l_nbuckets;
Elf32_Word l_gnu_bitmask_idxbits;
Elf32_Word l_gnu_shift;
const ElfW(Addr) *l_gnu_bitmask;
union
{
const Elf32_Word *l_gnu_buckets;
const Elf_Symndx *l_chain;
};
union
{
const Elf32_Word *l_gnu_chain_zero;
const Elf_Symndx *l_buckets;
};
unsigned int l_direct_opencount;
/* Reference count for dlopen/dlclose.*/
enum/* Where this object came from.*/
{
lt_executable,/* The main executable program.*/
lt_library,/* Library needed by main executable.*/
lt_loaded/* Extra run-time loaded shared object.*/
} l_type:2;
unsigned int l_relocated:1;
/* Nonzero if object's relocations done.*/
unsigned int l_init_called:1;
/* Nonzero if DT_INIT function called.*/
unsigned int l_global:1;
/* Nonzero if object in _dl_global_scope.*/
unsigned int l_reserved:2;
/* Reserved for internal use.*/
unsigned int l_phdr_allocated:1;
/* Nonzero if the data structure pointed
to by `l_phdr' is allocated.*/
unsigned int l_soname_added:1;
/* Nonzero if the SONAME is for sure in
the l_libname list.*/
unsigned int l_faked:1;
/* Nonzero if this is a faked descriptor
without associated file.*/
unsigned int l_need_tls_init:1;
/* Nonzero if GL(dl_init_static_tls)
should be called on this link map
when relocation finishes.*/
unsigned int l_auditing:1;
/* Nonzero if the DSO is used in auditing.*/
unsigned int l_audit_any_plt:1;
/* Nonzero if at least one audit module
is interested in the PLT interception.*/
unsigned int l_removed:1;
/* Nozero if the object cannot be used anymore
since it is removed.*/
unsigned int l_contiguous:1;
/* Nonzero if inter-segment holes are
mprotected or if no holes are present at
all.*/
unsigned int l_symbolic_in_local_scope:1;
/* Nonzero if l_local_scope
during LD_TRACE_PRELINKING=1
contains any DT_SYMBOLIC
libraries.*//* Collected information about own RPATH directories.*/
struct r_search_path_struct l_rpath_dirs;
/* Collected results of relocation while profiling.*/
struct reloc_result
{
DL_FIXUP_VALUE_TYPE addr;
struct link_map *bound;
unsigned int boundndx;
uint32_t enterexit;
unsigned int flags;
} *l_reloc_result;
/* Pointer to the version information if available.*/
ElfW(Versym) *l_versyms;
/* String specifying the path where this object was found.*/
const char *l_origin;
/* Start and finish of memory map for this object.l_map_start
need not be the same as l_addr.*/
ElfW(Addr) l_map_start, l_map_end;
/* End of the executable part of the mapping.*/
ElfW(Addr) l_text_end;
/* Default array for 'l_scope'.*/
struct r_scope_elem *l_scope_mem[4];
/* Size of array allocated for 'l_scope'.*/
size_t l_scope_max;
/* This is an array defining the lookup scope for this link map.
There are initially at most three different scope lists.*/
struct r_scope_elem **l_scope;
/* A similar array, this time only with the local scope.This is
used occasionally.*/
struct r_scope_elem *l_local_scope[2];
/* This information is kept to check for sure whether a shared
object is the same as one already loaded.*/
dev_t l_dev;
ino64_t l_ino;
/* Collected information about own RUNPATH directories.*/
struct r_search_path_struct l_runpath_dirs;
/* List of object in order of the init and fini calls.*/
struct link_map **l_initfini;
/* The init and fini list generated at startup, saved when the
object is also loaded dynamically.*/
struct link_map **l_orig_initfini;
/* List of the dependencies introduced through symbol binding.*/
struct link_map_reldeps
{
unsigned int act;
struct link_map *list[];
} *l_reldeps;
unsigned int l_reldepsmax;
/* Nonzero if the DSO is used.*/
unsigned int l_used;
/* Various flag words.*/
ElfW(Word) l_feature_1;
ElfW(Word) l_flags_1;
ElfW(Word) l_flags;
/* Temporarily used in `dl_close'.*/
int l_idx;
struct link_map_machine l_mach;
struct
{
const ElfW(Sym) *sym;
int type_class;
struct link_map *value;
const ElfW(Sym) *ret;
} l_lookup_cache;
/* Thread-local storage related info.*//* Start of the initialization image.*/
void *l_tls_initimage;
/* Size of the initialization image.*/
size_t l_tls_initimage_size;
/* Size of the TLS block.*/
size_t l_tls_blocksize;
/* Alignment requirement of the TLS block.*/
size_t l_tls_align;
/* Offset of first byte module alignment.*/
size_t l_tls_firstbyte_offset;
#ifndef NO_TLS_OFFSET
# define NO_TLS_OFFSET 0
#endif
#ifndef FORCED_DYNAMIC_TLS_OFFSET
# if NO_TLS_OFFSET == 0
#define FORCED_DYNAMIC_TLS_OFFSET 1
# elif NO_TLS_OFFSET == -1
#define FORCED_DYNAMIC_TLS_OFFSET -2
# else
#error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
# endif
#endif
/* For objects present at startup time: offset in the static TLS block.*/
ptrdiff_t l_tls_offset;
/* Index of the module in the dtv array.*/
size_t l_tls_modid;
/* Information used to change permission after the relocations are
done.*/
ElfW(Addr) l_relro_addr;
size_t l_relro_size;
unsigned long long int l_serial;
/* Audit information.This array apparent must be the last in the
structure.Never add something after it.*/
struct auditstate
{
uintptr_t cookie;
unsigned int bindflags;
} l_audit[0];
};