binfmt_elf.cを読む

  • ET_DYN: shared object, including executables and loaders
  • ET_EXEC: static executables

arch/x86/include/asm/elf.h

   /*
   * This is the base location for PIE (ET_DYN with INTERP) loads. On
   * 64-bit, this is above 4GB to leave the entire 32-bit address
   * space open for things that want to use the area for 32-bit pointers.
   */
  #define ELF_ET_DYN_BASE		(mmap_is_ia32() ? 0x000400000UL : \
  						  (DEFAULT_MAP_WINDOW / 3 * 2))
...

    if (interpreter) {
            load_bias = ELF_ET_DYN_BASE;
            if (current->flags & PF_RANDOMIZE)
                load_bias += arch_mmap_rnd();
            elf_flags |= elf_fixed;
        } else
            load_bias = 0;
...

 static unsigned long elf_map(struct file *filep, unsigned long addr,
  		const struct elf_phdr *eppnt, int prot, int type,
  		unsigned long total_size)
  {
  	...(省略)...
  	/*
  	* total_size is the size of the ELF (interpreter) image.
  	* The _first_ mmap needs to know the full size, otherwise
  	* randomization might put this image into an overlapping
  	* position with the ELF binary image. (since size < total_size)
  	* So we first map the 'big' image - and unmap the remainder at
  	* the end. (which unmap is needed for ELF images with holes.)
  	*/
  	if (total_size) {
  		total_size = ELF_PAGEALIGN(total_size);
  		map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
  		if (!BAD_ADDR(map_addr))
  			vm_munmap(map_addr+size, total_size-size);
  	} else
  	  		map_addr = vm_mmap(filep, addr, size, prot, type, off);

    ...
	 
  	if (interpreter) {
  		unsigned long interp_map_addr = 0;
  
  		elf_entry = load_elf_interp(&loc->interp_elf_ex,
  					    interpreter,
  					    &interp_map_addr,
  					    load_bias, interp_elf_phdata);
...
  static int
  create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
  		unsigned long load_addr, unsigned long interp_load_addr)
  {
  ...(省略)...
  	/*
  	 * Generate 16 random bytes for userspace PRNG seeding.
  	 */
  	get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
  	u_rand_bytes = (elf_addr_t __user *)
  		       STACK_ALLOC(p, sizeof(k_rand_bytes));
  	if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
  		return -EFAULT;
  
  	/* Create the ELF interpreter info */
  	elf_info = (elf_addr_t *)current->mm->saved_auxv;
  	/* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
  #define NEW_AUX_ENT(id, val) \
  	do { \
  		elf_info[ei_index++] = id; \
  		elf_info[ei_index++] = val; \
  	} while (0)
  
  #ifdef ARCH_DLINFO
  	/* 
  	 * ARCH_DLINFO must come first so PPC can do its special alignment of
  	 * AUXV.
  	 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
  	 * ARCH_DLINFO changes
  	 */
  	ARCH_DLINFO;
  #endif
  	NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
  	NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
  	NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
  	NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
  	NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
  	NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
  	NEW_AUX_ENT(AT_BASE, interp_load_addr);
  	NEW_AUX_ENT(AT_FLAGS, 0);
  	NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
  	NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
  	NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
  	NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
  	NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
  	NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
  	NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
  #ifdef ELF_HWCAP2
  	NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
  #endif
  	NEW_AUX_ENT(AT_EXECFN, bprm->exec);
  	if (k_platform) {
  		NEW_AUX_ENT(AT_PLATFORM,
  			    (elf_addr_t)(unsigned long)u_platform);
  	}
  	if (k_base_platform) {
  		NEW_AUX_ENT(AT_BASE_PLATFORM,
  			    (elf_addr_t)(unsigned long)u_base_platform);
  	}
  	if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
  		NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
  	}
  #undef NEW_AUX_ENT
  	/* AT_NULL is zero; clear the rest too */
  	memset(&elf_info[ei_index], 0,
  	       sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
  
  	/* And advance past the AT_NULL entry.  */
  	ei_index += 2;
  
  	sp = STACK_ADD(p, ei_index);
  
  	items = (argc + 1) + (envc + 1) + 1;
  	bprm->p = STACK_ROUND(sp, items);
  
  	/* Point sp at the lowest address on the stack */
  #ifdef CONFIG_STACK_GROWSUP
  	sp = (elf_addr_t __user *)bprm->p - items - ei_index;
  	bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
  #else
  	sp = (elf_addr_t __user *)bprm->p;
  #endif
  
  ...(省略)...
  
  	/* Now, let's put argc (and argv, envp if appropriate) on the stack */
  	if (__put_user(argc, sp++))
  		return -EFAULT;
  		
  
  	/* Populate list of argv pointers back to argv strings. */
  	p = current->mm->arg_end = current->mm->arg_start;
  	while (argc-- > 0) {
  		size_t len;
  		if (__put_user((elf_addr_t)p, sp++))
  			return -EFAULT;
  		len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
  		if (!len || len > MAX_ARG_STRLEN)
  			return -EINVAL;
  		p += len;
  	}
  	if (__put_user(0, sp++))
  		return -EFAULT;
  	current->mm->arg_end = p;
  
  	/* Populate list of envp pointers back to envp strings. */
  	current->mm->env_end = current->mm->env_start = p;
  	while (envc-- > 0) {
  		size_t len;
  		if (__put_user((elf_addr_t)p, sp++))
  			return -EFAULT;
  		len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
  		if (!len || len > MAX_ARG_STRLEN)
  			return -EINVAL;
  		p += len;
  	}
  	if (__put_user(0, sp++))
  		return -EFAULT;
  	current->mm->env_end = p;
  
  	/* Put the elf_info on the stack in the right place.  */
  	if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
  

	

Backlinks

There are no notes linking to this note.