2021-10-27 18:46:41 +05:00
/*
2021-11-13 09:26:51 +05:00
Copyright ( C ) 2002 Richard Henderson
Copyright ( C ) 2001 Rusty Russell , 2002 , 2010 Rusty Russell IBM .
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 2 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program ; if not , write to the Free Software
Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
2021-10-27 18:46:41 +05:00
# include <linux/export.h>
# include <linux/moduleloader.h>
# include <linux/trace_events.h>
# include <linux/init.h>
# include <linux/kallsyms.h>
# include <linux/file.h>
# include <linux/fs.h>
# include <linux/sysfs.h>
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/vmalloc.h>
# include <linux/elf.h>
# include <linux/proc_fs.h>
# include <linux/security.h>
# include <linux/seq_file.h>
# include <linux/syscalls.h>
# include <linux/fcntl.h>
# include <linux/rcupdate.h>
# include <linux/capability.h>
# include <linux/cpu.h>
# include <linux/moduleparam.h>
# include <linux/errno.h>
# include <linux/err.h>
# include <linux/vermagic.h>
# include <linux/notifier.h>
# include <linux/sched.h>
# include <linux/device.h>
# include <linux/string.h>
# include <linux/mutex.h>
# include <linux/rculist.h>
2021-11-13 09:26:51 +05:00
# include <asm/uaccess.h>
2021-10-27 18:46:41 +05:00
# include <asm/cacheflush.h>
# include <asm/mmu_context.h>
# include <linux/license.h>
# include <asm/sections.h>
# include <linux/tracepoint.h>
# include <linux/ftrace.h>
# include <linux/livepatch.h>
# include <linux/async.h>
# include <linux/percpu.h>
# include <linux/kmemleak.h>
# include <linux/jump_label.h>
# include <linux/pfn.h>
# include <linux/bsearch.h>
# include <linux/dynamic_debug.h>
2021-11-13 09:26:51 +05:00
# include <linux/grsecurity.h>
2021-10-27 18:46:41 +05:00
# include <uapi/linux/module.h>
# include "module-internal.h"
# define CREATE_TRACE_POINTS
# include <trace/events/module.h>
# ifndef ARCH_SHF_SMALL
# define ARCH_SHF_SMALL 0
# endif
/*
* Modules ' sections will be aligned on page boundaries
* to ensure complete separation of code and data , but
2021-11-13 09:26:51 +05:00
* only when CONFIG_DEBUG_SET_MODULE_RONX = y
2021-10-27 18:46:41 +05:00
*/
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_DEBUG_SET_MODULE_RONX
2021-10-27 18:46:41 +05:00
# define debug_align(X) ALIGN(X, PAGE_SIZE)
# else
# define debug_align(X) (X)
# endif
/* If this is set, the section belongs in the init part of the module */
# define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
/*
* Mutex protects :
* 1 ) List of modules ( also safely readable with preempt_disable ) ,
* 2 ) module_use links ,
* 3 ) module_addr_min / module_addr_max .
2021-11-13 09:26:51 +05:00
* ( delete and add uses RCU list operations ) . */
DEFINE_MUTEX ( module_mutex ) ;
EXPORT_SYMBOL_GPL ( module_mutex ) ;
2021-10-27 18:46:41 +05:00
static LIST_HEAD ( modules ) ;
# ifdef CONFIG_MODULES_TREE_LOOKUP
/*
* Use a latched RB - tree for __module_address ( ) ; this allows us to use
* RCU - sched lookups of the address from any context .
*
* This is conditional on PERF_EVENTS | | TRACING because those can really hit
* __module_address ( ) hard by doing a lot of stack unwinding ; potentially from
* NMI context .
*/
static __always_inline unsigned long __mod_tree_val ( struct latch_tree_node * n )
{
2021-11-13 09:26:51 +05:00
struct mod_tree_node * mtn = container_of ( n , struct mod_tree_node , node ) ;
struct module * mod = mtn - > mod ;
if ( unlikely ( mtn = = & mod - > init_layout . mtn_rw ) )
return ( unsigned long ) mod - > init_layout . base_rw ;
if ( unlikely ( mtn = = & mod - > init_layout . mtn_rx ) )
return ( unsigned long ) mod - > init_layout . base_rx ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
if ( unlikely ( mtn = = & mod - > core_layout . mtn_rw ) )
return ( unsigned long ) mod - > core_layout . base_rw ;
return ( unsigned long ) mod - > core_layout . base_rx ;
2021-10-27 18:46:41 +05:00
}
static __always_inline unsigned long __mod_tree_size ( struct latch_tree_node * n )
{
2021-11-13 09:26:51 +05:00
struct mod_tree_node * mtn = container_of ( n , struct mod_tree_node , node ) ;
struct module * mod = mtn - > mod ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
if ( unlikely ( mtn = = & mod - > init_layout . mtn_rw ) )
return ( unsigned long ) mod - > init_layout . size_rw ;
if ( unlikely ( mtn = = & mod - > init_layout . mtn_rx ) )
return ( unsigned long ) mod - > init_layout . size_rx ;
if ( unlikely ( mtn = = & mod - > core_layout . mtn_rw ) )
return ( unsigned long ) mod - > core_layout . size_rw ;
return ( unsigned long ) mod - > core_layout . size_rx ;
2021-10-27 18:46:41 +05:00
}
static __always_inline bool
mod_tree_less ( struct latch_tree_node * a , struct latch_tree_node * b )
{
return __mod_tree_val ( a ) < __mod_tree_val ( b ) ;
}
static __always_inline int
mod_tree_comp ( void * key , struct latch_tree_node * n )
{
unsigned long val = ( unsigned long ) key ;
unsigned long start , end ;
start = __mod_tree_val ( n ) ;
if ( val < start )
return - 1 ;
end = start + __mod_tree_size ( n ) ;
if ( val > = end )
return 1 ;
return 0 ;
}
static const struct latch_tree_ops mod_tree_ops = {
. less = mod_tree_less ,
. comp = mod_tree_comp ,
} ;
static struct mod_tree_root {
struct latch_tree_root root ;
2021-11-13 09:26:51 +05:00
unsigned long addr_min_rw ;
unsigned long addr_min_rx ;
unsigned long addr_max_rw ;
unsigned long addr_max_rx ;
2021-10-27 18:46:41 +05:00
} mod_tree __cacheline_aligned = {
2021-11-13 09:26:51 +05:00
. addr_min_rw = - 1UL ,
. addr_min_rx = - 1UL ,
2021-10-27 18:46:41 +05:00
} ;
2021-11-13 09:26:51 +05:00
# define module_addr_min_rw mod_tree.addr_min_rw
# define module_addr_min_rx mod_tree.addr_min_rx
# define module_addr_max_rw mod_tree.addr_max_rw
# define module_addr_max_rx mod_tree.addr_max_rx
2021-10-27 18:46:41 +05:00
static noinline void __mod_tree_insert ( struct mod_tree_node * node )
{
latch_tree_insert ( & node - > node , & mod_tree . root , & mod_tree_ops ) ;
}
static void __mod_tree_remove ( struct mod_tree_node * node )
{
latch_tree_erase ( & node - > node , & mod_tree . root , & mod_tree_ops ) ;
}
/*
* These modifications : insert , remove_init and remove ; are serialized by the
* module_mutex .
*/
static void mod_tree_insert ( struct module * mod )
{
2021-11-13 09:26:51 +05:00
mod - > core_layout . mtn_rx . mod = mod ;
mod - > core_layout . mtn_rw . mod = mod ;
mod - > init_layout . mtn_rx . mod = mod ;
mod - > init_layout . mtn_rw . mod = mod ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
__mod_tree_insert ( & mod - > core_layout . mtn_rx ) ;
__mod_tree_insert ( & mod - > core_layout . mtn_rw ) ;
if ( mod - > init_layout . size_rx )
__mod_tree_insert ( & mod - > init_layout . mtn_rx ) ;
if ( mod - > init_layout . size_rw )
__mod_tree_insert ( & mod - > init_layout . mtn_rw ) ;
2021-10-27 18:46:41 +05:00
}
static void mod_tree_remove_init ( struct module * mod )
{
2021-11-13 09:26:51 +05:00
if ( mod - > init_layout . size_rx )
__mod_tree_remove ( & mod - > init_layout . mtn_rx ) ;
if ( mod - > init_layout . size_rw )
__mod_tree_remove ( & mod - > init_layout . mtn_rw ) ;
2021-10-27 18:46:41 +05:00
}
static void mod_tree_remove ( struct module * mod )
{
2021-11-13 09:26:51 +05:00
__mod_tree_remove ( & mod - > core_layout . mtn_rx ) ;
__mod_tree_remove ( & mod - > core_layout . mtn_rw ) ;
2021-10-27 18:46:41 +05:00
mod_tree_remove_init ( mod ) ;
}
static struct module * mod_find ( unsigned long addr )
{
struct latch_tree_node * ltn ;
ltn = latch_tree_find ( ( void * ) addr , & mod_tree . root , & mod_tree_ops ) ;
if ( ! ltn )
return NULL ;
return container_of ( ltn , struct mod_tree_node , node ) - > mod ;
}
# else /* MODULES_TREE_LOOKUP */
2021-11-13 09:26:51 +05:00
static unsigned long module_addr_min_rw = - 1UL , module_addr_max_rw = 0 ;
static unsigned long module_addr_min_rx = - 1UL , module_addr_max_rx = 0 ;
2021-10-27 18:46:41 +05:00
static void mod_tree_insert ( struct module * mod ) { }
static void mod_tree_remove_init ( struct module * mod ) { }
static void mod_tree_remove ( struct module * mod ) { }
static struct module * mod_find ( unsigned long addr )
{
struct module * mod ;
2021-11-13 09:26:51 +05:00
list_for_each_entry_rcu ( mod , & modules , list ) {
2021-10-27 18:46:41 +05:00
if ( within_module ( addr , mod ) )
return mod ;
}
return NULL ;
}
# endif /* MODULES_TREE_LOOKUP */
/*
* Bounds of module text , for speeding up __module_address .
* Protected by module_mutex .
*/
2021-11-13 09:26:51 +05:00
static void __mod_update_bounds_rx ( void * base , unsigned int size )
2021-10-27 18:46:41 +05:00
{
unsigned long min = ( unsigned long ) base ;
unsigned long max = min + size ;
2021-11-13 09:26:51 +05:00
if ( min < module_addr_min_rx )
module_addr_min_rx = min ;
if ( max > module_addr_max_rx )
module_addr_max_rx = max ;
}
static void __mod_update_bounds_rw ( void * base , unsigned int size )
{
unsigned long min = ( unsigned long ) base ;
unsigned long max = min + size ;
if ( min < module_addr_min_rw )
module_addr_min_rw = min ;
if ( max > module_addr_max_rw )
module_addr_max_rw = max ;
2021-10-27 18:46:41 +05:00
}
static void mod_update_bounds ( struct module * mod )
{
2021-11-13 09:26:51 +05:00
__mod_update_bounds_rx ( mod - > core_layout . base_rx , mod - > core_layout . size_rx ) ;
__mod_update_bounds_rw ( mod - > core_layout . base_rw , mod - > core_layout . size_rw ) ;
if ( mod - > init_layout . size_rx )
__mod_update_bounds_rx ( mod - > init_layout . base_rx , mod - > init_layout . size_rx ) ;
if ( mod - > init_layout . size_rw )
__mod_update_bounds_rw ( mod - > init_layout . base_rw , mod - > init_layout . size_rw ) ;
2021-10-27 18:46:41 +05:00
}
# ifdef CONFIG_KGDB_KDB
struct list_head * kdb_modules = & modules ; /* kdb needs the list of modules */
# endif /* CONFIG_KGDB_KDB */
2021-11-13 09:26:51 +05:00
static void module_assert_mutex ( void )
{
lockdep_assert_held ( & module_mutex ) ;
}
2021-10-27 18:46:41 +05:00
static void module_assert_mutex_or_preempt ( void )
{
# ifdef CONFIG_LOCKDEP
if ( unlikely ( ! debug_locks ) )
return ;
WARN_ON_ONCE ( ! rcu_read_lock_sched_held ( ) & &
! lockdep_is_held ( & module_mutex ) ) ;
# endif
}
static bool sig_enforce = IS_ENABLED ( CONFIG_MODULE_SIG_FORCE ) ;
2021-11-13 09:26:51 +05:00
# ifndef CONFIG_MODULE_SIG_FORCE
2021-10-27 18:46:41 +05:00
module_param ( sig_enforce , bool_enable_only , 0644 ) ;
2021-11-13 09:26:51 +05:00
# endif /* !CONFIG_MODULE_SIG_FORCE */
2021-10-27 18:46:41 +05:00
/* Block module loading/unloading? */
2021-11-13 09:26:51 +05:00
int modules_disabled __read_only = 0 ;
2021-10-27 18:46:41 +05:00
core_param ( nomodule , modules_disabled , bint , 0 ) ;
/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD ( module_wq ) ;
static BLOCKING_NOTIFIER_HEAD ( module_notify_list ) ;
int register_module_notifier ( struct notifier_block * nb )
{
return blocking_notifier_chain_register ( & module_notify_list , nb ) ;
}
EXPORT_SYMBOL ( register_module_notifier ) ;
int unregister_module_notifier ( struct notifier_block * nb )
{
return blocking_notifier_chain_unregister ( & module_notify_list , nb ) ;
}
EXPORT_SYMBOL ( unregister_module_notifier ) ;
2021-11-13 09:26:51 +05:00
struct load_info {
Elf_Ehdr * hdr ;
unsigned long len ;
Elf_Shdr * sechdrs ;
char * secstrings , * strtab ;
unsigned long symoffs , stroffs ;
struct _ddebug * debug ;
unsigned int num_debug ;
bool sig_ok ;
# ifdef CONFIG_KALLSYMS
unsigned long mod_kallsyms_init_off ;
# endif
struct {
unsigned int sym , str , mod , vers , info , pcpu ;
} index ;
} ;
/* We require a truly strong try_module_get(): 0 means failure due to
ongoing or failed initialization etc . */
2021-10-27 18:46:41 +05:00
static inline int strong_try_module_get ( struct module * mod )
{
BUG_ON ( mod & & mod - > state = = MODULE_STATE_UNFORMED ) ;
if ( mod & & mod - > state = = MODULE_STATE_COMING )
return - EBUSY ;
if ( try_module_get ( mod ) )
return 0 ;
else
return - ENOENT ;
}
static inline void add_taint_module ( struct module * mod , unsigned flag ,
enum lockdep_ok lockdep_ok )
{
add_taint ( flag , lockdep_ok ) ;
2021-11-13 09:26:51 +05:00
mod - > taints | = ( 1U < < flag ) ;
2021-10-27 18:46:41 +05:00
}
/*
* A thread that wants to hold a reference to a module only while it
* is running can call this to safely exit . nfsd and lockd use this .
*/
void __noreturn __module_put_and_exit ( struct module * mod , long code )
{
module_put ( mod ) ;
do_exit ( code ) ;
}
EXPORT_SYMBOL ( __module_put_and_exit ) ;
/* Find a module section: 0 means not found. */
static unsigned int find_sec ( const struct load_info * info , const char * name )
{
unsigned int i ;
for ( i = 1 ; i < info - > hdr - > e_shnum ; i + + ) {
Elf_Shdr * shdr = & info - > sechdrs [ i ] ;
/* Alloc bit cleared means "ignore it." */
if ( ( shdr - > sh_flags & SHF_ALLOC )
& & strcmp ( info - > secstrings + shdr - > sh_name , name ) = = 0 )
return i ;
}
return 0 ;
}
/* Find a module section, or NULL. */
static void * section_addr ( const struct load_info * info , const char * name )
{
/* Section 0 has sh_addr 0. */
return ( void * ) info - > sechdrs [ find_sec ( info , name ) ] . sh_addr ;
}
/* Find a module section, or NULL. Fill in number of "objects" in section. */
static void * section_objs ( const struct load_info * info ,
const char * name ,
size_t object_size ,
unsigned int * num )
{
unsigned int sec = find_sec ( info , name ) ;
/* Section 0 has sh_addr 0 and sh_size 0. */
* num = info - > sechdrs [ sec ] . sh_size / object_size ;
return ( void * ) info - > sechdrs [ sec ] . sh_addr ;
}
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab [ ] ;
extern const struct kernel_symbol __stop___ksymtab [ ] ;
extern const struct kernel_symbol __start___ksymtab_gpl [ ] ;
extern const struct kernel_symbol __stop___ksymtab_gpl [ ] ;
2021-11-13 09:26:51 +05:00
extern const struct kernel_symbol __start___ksymtab_gpl_future [ ] ;
extern const struct kernel_symbol __stop___ksymtab_gpl_future [ ] ;
extern const unsigned long __start___kcrctab [ ] ;
extern const unsigned long __start___kcrctab_gpl [ ] ;
extern const unsigned long __start___kcrctab_gpl_future [ ] ;
# ifdef CONFIG_UNUSED_SYMBOLS
extern const struct kernel_symbol __start___ksymtab_unused [ ] ;
extern const struct kernel_symbol __stop___ksymtab_unused [ ] ;
extern const struct kernel_symbol __start___ksymtab_unused_gpl [ ] ;
extern const struct kernel_symbol __stop___ksymtab_unused_gpl [ ] ;
extern const unsigned long __start___kcrctab_unused [ ] ;
extern const unsigned long __start___kcrctab_unused_gpl [ ] ;
# endif
2021-10-27 18:46:41 +05:00
# ifndef CONFIG_MODVERSIONS
# define symversion(base, idx) NULL
# else
# define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
# endif
2021-11-13 09:26:51 +05:00
static bool each_symbol_in_section ( const struct symsearch * arr ,
unsigned int arrsize ,
struct module * owner ,
bool ( * fn ) ( const struct symsearch * syms ,
struct module * owner ,
void * data ) ,
void * data )
{
unsigned int j ;
for ( j = 0 ; j < arrsize ; j + + ) {
if ( fn ( & arr [ j ] , owner , data ) )
return true ;
}
return false ;
}
/* Returns true as soon as fn returns true, otherwise false. */
bool each_symbol_section ( bool ( * fn ) ( const struct symsearch * arr ,
struct module * owner ,
void * data ) ,
void * data )
{
struct module * mod ;
static const struct symsearch arr [ ] = {
{ __start___ksymtab , __stop___ksymtab , __start___kcrctab ,
NOT_GPL_ONLY , false } ,
{ __start___ksymtab_gpl , __stop___ksymtab_gpl ,
__start___kcrctab_gpl ,
GPL_ONLY , false } ,
{ __start___ksymtab_gpl_future , __stop___ksymtab_gpl_future ,
__start___kcrctab_gpl_future ,
WILL_BE_GPL_ONLY , false } ,
# ifdef CONFIG_UNUSED_SYMBOLS
{ __start___ksymtab_unused , __stop___ksymtab_unused ,
__start___kcrctab_unused ,
NOT_GPL_ONLY , true } ,
{ __start___ksymtab_unused_gpl , __stop___ksymtab_unused_gpl ,
__start___kcrctab_unused_gpl ,
GPL_ONLY , true } ,
# endif
} ;
module_assert_mutex_or_preempt ( ) ;
if ( each_symbol_in_section ( arr , ARRAY_SIZE ( arr ) , NULL , fn , data ) )
return true ;
list_for_each_entry_rcu ( mod , & modules , list ) {
struct symsearch modarr [ ] = {
{ mod - > syms , mod - > syms + mod - > num_syms , mod - > crcs ,
NOT_GPL_ONLY , false } ,
{ mod - > gpl_syms , mod - > gpl_syms + mod - > num_gpl_syms ,
mod - > gpl_crcs ,
GPL_ONLY , false } ,
{ mod - > gpl_future_syms ,
mod - > gpl_future_syms + mod - > num_gpl_future_syms ,
mod - > gpl_future_crcs ,
WILL_BE_GPL_ONLY , false } ,
# ifdef CONFIG_UNUSED_SYMBOLS
{ mod - > unused_syms ,
mod - > unused_syms + mod - > num_unused_syms ,
mod - > unused_crcs ,
NOT_GPL_ONLY , true } ,
{ mod - > unused_gpl_syms ,
mod - > unused_gpl_syms + mod - > num_unused_gpl_syms ,
mod - > unused_gpl_crcs ,
GPL_ONLY , true } ,
# endif
} ;
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
if ( each_symbol_in_section ( modarr , ARRAY_SIZE ( modarr ) , mod , fn , data ) )
return true ;
}
return false ;
}
EXPORT_SYMBOL_GPL ( each_symbol_section ) ;
2021-10-27 18:46:41 +05:00
struct find_symbol_arg {
/* Input */
const char * name ;
bool gplok ;
bool warn ;
/* Output */
struct module * owner ;
2021-11-13 09:26:51 +05:00
const unsigned long * crc ;
2021-10-27 18:46:41 +05:00
const struct kernel_symbol * sym ;
} ;
2021-11-13 09:26:51 +05:00
static bool check_symbol ( const struct symsearch * syms ,
struct module * owner ,
unsigned int symnum , void * data )
2021-10-27 18:46:41 +05:00
{
struct find_symbol_arg * fsa = data ;
2021-11-13 09:26:51 +05:00
if ( ! fsa - > gplok ) {
if ( syms - > licence = = GPL_ONLY )
return false ;
if ( syms - > licence = = WILL_BE_GPL_ONLY & & fsa - > warn ) {
pr_warn ( " Symbol %s is being used by a non-GPL module, "
" which will not be allowed in the future \n " ,
fsa - > name ) ;
}
}
# ifdef CONFIG_UNUSED_SYMBOLS
if ( syms - > unused & & fsa - > warn ) {
pr_warn ( " Symbol %s is marked as UNUSED, however this module is "
" using it. \n " , fsa - > name ) ;
pr_warn ( " This symbol will go away in the future. \n " ) ;
pr_warn ( " Please evaluate if this is the right api to use and "
" if it really is, submit a report to the linux kernel "
" mailing list together with submitting your code for "
" inclusion. \n " ) ;
}
# endif
2021-10-27 18:46:41 +05:00
fsa - > owner = owner ;
fsa - > crc = symversion ( syms - > crcs , symnum ) ;
fsa - > sym = & syms - > start [ symnum ] ;
return true ;
}
2021-11-13 09:26:51 +05:00
static int cmp_name ( const void * va , const void * vb )
2021-10-27 18:46:41 +05:00
{
2021-11-13 09:26:51 +05:00
const char * a ;
const struct kernel_symbol * b ;
a = va ; b = vb ;
return strcmp ( a , b - > name ) ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
static bool find_symbol_in_section ( const struct symsearch * syms ,
struct module * owner ,
void * data )
2021-10-27 18:46:41 +05:00
{
struct find_symbol_arg * fsa = data ;
struct kernel_symbol * sym ;
sym = bsearch ( fsa - > name , syms - > start , syms - > stop - syms - > start ,
sizeof ( struct kernel_symbol ) , cmp_name ) ;
2021-11-13 09:26:51 +05:00
if ( sym ! = NULL & & check_symbol ( syms , owner , sym - syms - > start , data ) )
2021-10-27 18:46:41 +05:00
return true ;
return false ;
}
2021-11-13 09:26:51 +05:00
/* Find a symbol and return it, along with, (optional) crc and
* ( optional ) module which owns it . Needs preempt disabled or module_mutex . */
const struct kernel_symbol * find_symbol ( const char * name ,
struct module * * owner ,
const unsigned long * * crc ,
bool gplok ,
bool warn )
2021-10-27 18:46:41 +05:00
{
2021-11-13 09:26:51 +05:00
struct find_symbol_arg fsa ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
fsa . name = name ;
fsa . gplok = gplok ;
fsa . warn = warn ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
if ( each_symbol_section ( find_symbol_in_section , & fsa ) ) {
if ( owner )
* owner = fsa . owner ;
if ( crc )
* crc = fsa . crc ;
return fsa . sym ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
pr_debug ( " Failed to find symbol %s \n " , name ) ;
return NULL ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
EXPORT_SYMBOL_GPL ( find_symbol ) ;
2021-10-27 18:46:41 +05:00
/*
* Search for module by name : must hold module_mutex ( or preempt disabled
* for read - only access ) .
*/
static struct module * find_module_all ( const char * name , size_t len ,
bool even_unformed )
{
struct module * mod ;
module_assert_mutex_or_preempt ( ) ;
2021-11-13 09:26:51 +05:00
list_for_each_entry ( mod , & modules , list ) {
2021-10-27 18:46:41 +05:00
if ( ! even_unformed & & mod - > state = = MODULE_STATE_UNFORMED )
continue ;
if ( strlen ( mod - > name ) = = len & & ! memcmp ( mod - > name , name , len ) )
return mod ;
}
return NULL ;
}
struct module * find_module ( const char * name )
{
2021-11-13 09:26:51 +05:00
module_assert_mutex ( ) ;
2021-10-27 18:46:41 +05:00
return find_module_all ( name , strlen ( name ) , false ) ;
}
2021-11-13 09:26:51 +05:00
EXPORT_SYMBOL_GPL ( find_module ) ;
2021-10-27 18:46:41 +05:00
# ifdef CONFIG_SMP
static inline void __percpu * mod_percpu ( struct module * mod )
{
return mod - > percpu ;
}
static int percpu_modalloc ( struct module * mod , struct load_info * info )
{
Elf_Shdr * pcpusec = & info - > sechdrs [ info - > index . pcpu ] ;
unsigned long align = pcpusec - > sh_addralign ;
if ( ! pcpusec - > sh_size )
return 0 ;
2021-11-13 09:26:51 +05:00
if ( align - 1 > = PAGE_SIZE ) {
2021-10-27 18:46:41 +05:00
pr_warn ( " %s: per-cpu alignment %li > %li \n " ,
mod - > name , align , PAGE_SIZE ) ;
align = PAGE_SIZE ;
}
mod - > percpu = __alloc_reserved_percpu ( pcpusec - > sh_size , align ) ;
if ( ! mod - > percpu ) {
pr_warn ( " %s: Could not allocate %lu bytes percpu data \n " ,
mod - > name , ( unsigned long ) pcpusec - > sh_size ) ;
return - ENOMEM ;
}
mod - > percpu_size = pcpusec - > sh_size ;
return 0 ;
}
static void percpu_modfree ( struct module * mod )
{
free_percpu ( mod - > percpu ) ;
}
static unsigned int find_pcpusec ( struct load_info * info )
{
return find_sec ( info , " .data..percpu " ) ;
}
static void percpu_modcopy ( struct module * mod ,
const void * from , unsigned long size )
{
int cpu ;
for_each_possible_cpu ( cpu )
memcpy ( per_cpu_ptr ( mod - > percpu , cpu ) , from , size ) ;
}
2021-11-13 09:26:51 +05:00
/**
* is_module_percpu_address - test whether address is from module static percpu
* @ addr : address to test
*
* Test whether @ addr belongs to module static percpu area .
*
* RETURNS :
* % true if @ addr is from module static percpu area
*/
bool is_module_percpu_address ( unsigned long addr )
2021-10-27 18:46:41 +05:00
{
struct module * mod ;
unsigned int cpu ;
preempt_disable ( ) ;
list_for_each_entry_rcu ( mod , & modules , list ) {
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
if ( ! mod - > percpu_size )
continue ;
for_each_possible_cpu ( cpu ) {
void * start = per_cpu_ptr ( mod - > percpu , cpu ) ;
2021-11-13 09:26:51 +05:00
if ( ( void * ) addr > = start & &
( void * ) addr < start + mod - > percpu_size ) {
2021-10-27 18:46:41 +05:00
preempt_enable ( ) ;
return true ;
}
}
}
preempt_enable ( ) ;
return false ;
}
# else /* ... !CONFIG_SMP */
static inline void __percpu * mod_percpu ( struct module * mod )
{
return NULL ;
}
static int percpu_modalloc ( struct module * mod , struct load_info * info )
{
/* UP modules shouldn't have this section: ENOMEM isn't quite right */
if ( info - > sechdrs [ info - > index . pcpu ] . sh_size ! = 0 )
return - ENOMEM ;
return 0 ;
}
static inline void percpu_modfree ( struct module * mod )
{
}
static unsigned int find_pcpusec ( struct load_info * info )
{
return 0 ;
}
static inline void percpu_modcopy ( struct module * mod ,
const void * from , unsigned long size )
{
/* pcpusec should be 0, and size of that section should be 0. */
BUG_ON ( size ! = 0 ) ;
}
bool is_module_percpu_address ( unsigned long addr )
{
return false ;
}
# endif /* CONFIG_SMP */
# define MODINFO_ATTR(field) \
static void setup_modinfo_ # # field ( struct module * mod , const char * s ) \
{ \
mod - > field = kstrdup ( s , GFP_KERNEL ) ; \
} \
static ssize_t show_modinfo_ # # field ( struct module_attribute * mattr , \
struct module_kobject * mk , char * buffer ) \
{ \
return scnprintf ( buffer , PAGE_SIZE , " %s \n " , mk - > mod - > field ) ; \
} \
static int modinfo_ # # field # # _exists ( struct module * mod ) \
{ \
return mod - > field ! = NULL ; \
} \
static void free_modinfo_ # # field ( struct module * mod ) \
{ \
kfree ( mod - > field ) ; \
mod - > field = NULL ; \
} \
static struct module_attribute modinfo_ # # field = { \
. attr = { . name = __stringify ( field ) , . mode = 0444 } , \
. show = show_modinfo_ # # field , \
. setup = setup_modinfo_ # # field , \
. test = modinfo_ # # field # # _exists , \
. free = free_modinfo_ # # field , \
} ;
MODINFO_ATTR ( version ) ;
MODINFO_ATTR ( srcversion ) ;
static char last_unloaded_module [ MODULE_NAME_LEN + 1 ] ;
# ifdef CONFIG_MODULE_UNLOAD
EXPORT_TRACEPOINT_SYMBOL ( module_get ) ;
/* MODULE_REF_BASE is the base reference count by kmodule loader. */
# define MODULE_REF_BASE 1
/* Init the unload section of the module. */
static int module_unload_init ( struct module * mod )
{
/*
* Initialize reference counter to MODULE_REF_BASE .
* refcnt = = 0 means module is going .
*/
atomic_set ( & mod - > refcnt , MODULE_REF_BASE ) ;
INIT_LIST_HEAD ( & mod - > source_list ) ;
INIT_LIST_HEAD ( & mod - > target_list ) ;
/* Hold reference count during initialization. */
atomic_inc ( & mod - > refcnt ) ;
return 0 ;
}
/* Does a already use b? */
static int already_uses ( struct module * a , struct module * b )
{
struct module_use * use ;
list_for_each_entry ( use , & b - > source_list , source_list ) {
if ( use - > source = = a ) {
pr_debug ( " %s uses %s! \n " , a - > name , b - > name ) ;
return 1 ;
}
}
pr_debug ( " %s does not use %s! \n " , a - > name , b - > name ) ;
return 0 ;
}
/*
* Module a uses b
* - we add ' a ' as a " source " , ' b ' as a " target " of module use
* - the module_use is added to the list of ' b ' sources ( so
* ' b ' can walk the list to see who sourced them ) , and of ' a '
* targets ( so ' a ' can see what modules it targets ) .
*/
static int add_module_usage ( struct module * a , struct module * b )
{
struct module_use * use ;
pr_debug ( " Allocating new usage for %s. \n " , a - > name ) ;
use = kmalloc ( sizeof ( * use ) , GFP_ATOMIC ) ;
2021-11-13 09:26:51 +05:00
if ( ! use ) {
pr_warn ( " %s: out of memory loading \n " , a - > name ) ;
2021-10-27 18:46:41 +05:00
return - ENOMEM ;
2021-11-13 09:26:51 +05:00
}
2021-10-27 18:46:41 +05:00
use - > source = a ;
use - > target = b ;
list_add ( & use - > source_list , & b - > source_list ) ;
list_add ( & use - > target_list , & a - > target_list ) ;
return 0 ;
}
/* Module a uses b: caller needs module_mutex() */
2021-11-13 09:26:51 +05:00
int ref_module ( struct module * a , struct module * b )
2021-10-27 18:46:41 +05:00
{
int err ;
if ( b = = NULL | | already_uses ( a , b ) )
return 0 ;
/* If module isn't available, we fail. */
err = strong_try_module_get ( b ) ;
if ( err )
return err ;
err = add_module_usage ( a , b ) ;
if ( err ) {
module_put ( b ) ;
return err ;
}
return 0 ;
}
2021-11-13 09:26:51 +05:00
EXPORT_SYMBOL_GPL ( ref_module ) ;
2021-10-27 18:46:41 +05:00
/* Clear the unload stuff of the module. */
static void module_unload_free ( struct module * mod )
{
struct module_use * use , * tmp ;
mutex_lock ( & module_mutex ) ;
list_for_each_entry_safe ( use , tmp , & mod - > target_list , target_list ) {
struct module * i = use - > target ;
pr_debug ( " %s unusing %s \n " , mod - > name , i - > name ) ;
module_put ( i ) ;
list_del ( & use - > source_list ) ;
list_del ( & use - > target_list ) ;
kfree ( use ) ;
}
mutex_unlock ( & module_mutex ) ;
}
# ifdef CONFIG_MODULE_FORCE_UNLOAD
static inline int try_force_unload ( unsigned int flags )
{
int ret = ( flags & O_TRUNC ) ;
if ( ret )
add_taint ( TAINT_FORCED_RMMOD , LOCKDEP_NOW_UNRELIABLE ) ;
return ret ;
}
# else
static inline int try_force_unload ( unsigned int flags )
{
return 0 ;
}
# endif /* CONFIG_MODULE_FORCE_UNLOAD */
/* Try to release refcount of module, 0 means success. */
static int try_release_module_ref ( struct module * mod )
{
int ret ;
/* Try to decrement refcnt which we set at loading */
ret = atomic_sub_return ( MODULE_REF_BASE , & mod - > refcnt ) ;
BUG_ON ( ret < 0 ) ;
if ( ret )
/* Someone can put this right now, recover with checking */
ret = atomic_add_unless ( & mod - > refcnt , MODULE_REF_BASE , 0 ) ;
return ret ;
}
static int try_stop_module ( struct module * mod , int flags , int * forced )
{
/* If it's not unused, quit unless we're forcing. */
if ( try_release_module_ref ( mod ) ! = 0 ) {
* forced = try_force_unload ( flags ) ;
if ( ! ( * forced ) )
return - EWOULDBLOCK ;
}
/* Mark it as dying. */
mod - > state = MODULE_STATE_GOING ;
return 0 ;
}
/**
2021-11-13 09:26:51 +05:00
* module_refcount - return the refcount or - 1 if unloading
*
2021-10-27 18:46:41 +05:00
* @ mod : the module we ' re checking
*
2021-11-13 09:26:51 +05:00
* Returns :
2021-10-27 18:46:41 +05:00
* - 1 if the module is in the process of unloading
* otherwise the number of references in the kernel to the module
*/
int module_refcount ( struct module * mod )
{
return atomic_read ( & mod - > refcnt ) - MODULE_REF_BASE ;
}
EXPORT_SYMBOL ( module_refcount ) ;
/* This exists whether we can unload or not */
static void free_module ( struct module * mod ) ;
SYSCALL_DEFINE2 ( delete_module , const char __user * , name_user ,
unsigned int , flags )
{
struct module * mod ;
char name [ MODULE_NAME_LEN ] ;
int ret , forced = 0 ;
if ( ! capable ( CAP_SYS_MODULE ) | | modules_disabled )
return - EPERM ;
if ( strncpy_from_user ( name , name_user , MODULE_NAME_LEN - 1 ) < 0 )
return - EFAULT ;
name [ MODULE_NAME_LEN - 1 ] = ' \0 ' ;
if ( mutex_lock_interruptible ( & module_mutex ) ! = 0 )
return - EINTR ;
mod = find_module ( name ) ;
if ( ! mod ) {
ret = - ENOENT ;
goto out ;
}
if ( ! list_empty ( & mod - > source_list ) ) {
/* Other modules depend on us: get rid of them first. */
ret = - EWOULDBLOCK ;
goto out ;
}
/* Doing init or already dying? */
if ( mod - > state ! = MODULE_STATE_LIVE ) {
/* FIXME: if (force), slam module count damn the torpedoes */
pr_debug ( " %s already dying \n " , mod - > name ) ;
ret = - EBUSY ;
goto out ;
}
/* If it has an init func, it must have an exit func to unload */
if ( mod - > init & & ! mod - > exit ) {
forced = try_force_unload ( flags ) ;
if ( ! forced ) {
/* This module can't be removed */
ret = - EBUSY ;
goto out ;
}
}
/* Stop the machine so refcounts can't move and disable module. */
ret = try_stop_module ( mod , flags , & forced ) ;
if ( ret ! = 0 )
goto out ;
mutex_unlock ( & module_mutex ) ;
/* Final destruction now no one is using it. */
if ( mod - > exit ! = NULL )
mod - > exit ( ) ;
blocking_notifier_call_chain ( & module_notify_list ,
MODULE_STATE_GOING , mod ) ;
klp_module_going ( mod ) ;
ftrace_release_mod ( mod ) ;
async_synchronize_full ( ) ;
/* Store the name of the last unloaded module for diagnostic purposes */
strlcpy ( last_unloaded_module , mod - > name , sizeof ( last_unloaded_module ) ) ;
free_module ( mod ) ;
return 0 ;
out :
mutex_unlock ( & module_mutex ) ;
return ret ;
}
static inline void print_unload_info ( struct seq_file * m , struct module * mod )
{
struct module_use * use ;
int printed_something = 0 ;
seq_printf ( m , " %i " , module_refcount ( mod ) ) ;
/*
* Always include a trailing , so userspace can differentiate
* between this and the old multi - field proc format .
*/
list_for_each_entry ( use , & mod - > source_list , source_list ) {
printed_something = 1 ;
seq_printf ( m , " %s, " , use - > source - > name ) ;
}
if ( mod - > init ! = NULL & & mod - > exit = = NULL ) {
printed_something = 1 ;
seq_puts ( m , " [permanent], " ) ;
}
if ( ! printed_something )
seq_puts ( m , " - " ) ;
}
void __symbol_put ( const char * symbol )
{
2021-11-13 09:26:51 +05:00
struct module * owner ;
2021-10-27 18:46:41 +05:00
preempt_disable ( ) ;
2021-11-13 09:26:51 +05:00
if ( ! find_symbol ( symbol , & owner , NULL , true , false ) )
BUG ( ) ;
module_put ( owner ) ;
2021-10-27 18:46:41 +05:00
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( __symbol_put ) ;
/* Note this assumes addr is a function, which it currently always is. */
void symbol_put_addr ( void * addr )
{
struct module * modaddr ;
unsigned long a = ( unsigned long ) dereference_function_descriptor ( addr ) ;
if ( core_kernel_text ( a ) )
return ;
/*
* Even though we hold a reference on the module ; we still need to
* disable preemption in order to safely traverse the data structure .
*/
preempt_disable ( ) ;
modaddr = __module_text_address ( a ) ;
BUG_ON ( ! modaddr ) ;
module_put ( modaddr ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL_GPL ( symbol_put_addr ) ;
static ssize_t show_refcnt ( struct module_attribute * mattr ,
struct module_kobject * mk , char * buffer )
{
return sprintf ( buffer , " %i \n " , module_refcount ( mk - > mod ) ) ;
}
static struct module_attribute modinfo_refcnt =
__ATTR ( refcnt , 0444 , show_refcnt , NULL ) ;
void __module_get ( struct module * module )
{
if ( module ) {
preempt_disable ( ) ;
atomic_inc ( & module - > refcnt ) ;
trace_module_get ( module , _RET_IP_ ) ;
preempt_enable ( ) ;
}
}
EXPORT_SYMBOL ( __module_get ) ;
bool try_module_get ( struct module * module )
{
bool ret = true ;
if ( module ) {
preempt_disable ( ) ;
/* Note: here, we can fail to get a reference */
if ( likely ( module_is_live ( module ) & &
atomic_inc_not_zero ( & module - > refcnt ) ! = 0 ) )
trace_module_get ( module , _RET_IP_ ) ;
else
ret = false ;
preempt_enable ( ) ;
}
return ret ;
}
EXPORT_SYMBOL ( try_module_get ) ;
void module_put ( struct module * module )
{
int ret ;
if ( module ) {
preempt_disable ( ) ;
ret = atomic_dec_if_positive ( & module - > refcnt ) ;
WARN_ON ( ret < 0 ) ; /* Failed to put refcount */
trace_module_put ( module , _RET_IP_ ) ;
preempt_enable ( ) ;
}
}
EXPORT_SYMBOL ( module_put ) ;
# else /* !CONFIG_MODULE_UNLOAD */
static inline void print_unload_info ( struct seq_file * m , struct module * mod )
{
/* We don't know the usage count, or what modules are using. */
seq_puts ( m , " - - " ) ;
}
static inline void module_unload_free ( struct module * mod )
{
}
2021-11-13 09:26:51 +05:00
int ref_module ( struct module * a , struct module * b )
2021-10-27 18:46:41 +05:00
{
return strong_try_module_get ( b ) ;
}
2021-11-13 09:26:51 +05:00
EXPORT_SYMBOL_GPL ( ref_module ) ;
2021-10-27 18:46:41 +05:00
static inline int module_unload_init ( struct module * mod )
{
return 0 ;
}
# endif /* CONFIG_MODULE_UNLOAD */
static size_t module_flags_taint ( struct module * mod , char * buf )
{
size_t l = 0 ;
2021-11-13 09:26:51 +05:00
if ( mod - > taints & ( 1 < < TAINT_PROPRIETARY_MODULE ) )
buf [ l + + ] = ' P ' ;
if ( mod - > taints & ( 1 < < TAINT_OOT_MODULE ) )
buf [ l + + ] = ' O ' ;
if ( mod - > taints & ( 1 < < TAINT_FORCED_MODULE ) )
buf [ l + + ] = ' F ' ;
if ( mod - > taints & ( 1 < < TAINT_CRAP ) )
buf [ l + + ] = ' C ' ;
if ( mod - > taints & ( 1 < < TAINT_UNSIGNED_MODULE ) )
buf [ l + + ] = ' E ' ;
if ( mod - > taints & ( 1 < < TAINT_LIVEPATCH ) )
buf [ l + + ] = ' K ' ;
/*
* TAINT_FORCED_RMMOD : could be added .
* TAINT_CPU_OUT_OF_SPEC , TAINT_MACHINE_CHECK , TAINT_BAD_PAGE don ' t
* apply to modules .
*/
2021-10-27 18:46:41 +05:00
return l ;
}
static ssize_t show_initstate ( struct module_attribute * mattr ,
struct module_kobject * mk , char * buffer )
{
const char * state = " unknown " ;
switch ( mk - > mod - > state ) {
case MODULE_STATE_LIVE :
state = " live " ;
break ;
case MODULE_STATE_COMING :
state = " coming " ;
break ;
case MODULE_STATE_GOING :
state = " going " ;
break ;
default :
BUG ( ) ;
}
return sprintf ( buffer , " %s \n " , state ) ;
}
static struct module_attribute modinfo_initstate =
__ATTR ( initstate , 0444 , show_initstate , NULL ) ;
static ssize_t store_uevent ( struct module_attribute * mattr ,
struct module_kobject * mk ,
const char * buffer , size_t count )
{
2021-11-13 09:26:51 +05:00
enum kobject_action action ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
if ( kobject_action_type ( buffer , count , & action ) = = 0 )
kobject_uevent ( & mk - > kobj , action ) ;
return count ;
2021-10-27 18:46:41 +05:00
}
struct module_attribute module_uevent =
__ATTR ( uevent , 0200 , NULL , store_uevent ) ;
static ssize_t show_coresize ( struct module_attribute * mattr ,
struct module_kobject * mk , char * buffer )
{
2021-11-13 09:26:51 +05:00
return sprintf ( buffer , " %u \n " , mk - > mod - > core_layout . size_rx + mk - > mod - > core_layout . size_rw ) ;
2021-10-27 18:46:41 +05:00
}
static struct module_attribute modinfo_coresize =
__ATTR ( coresize , 0444 , show_coresize , NULL ) ;
static ssize_t show_initsize ( struct module_attribute * mattr ,
struct module_kobject * mk , char * buffer )
{
2021-11-13 09:26:51 +05:00
return sprintf ( buffer , " %u \n " , mk - > mod - > init_layout . size_rx + mk - > mod - > init_layout . size_rw ) ;
2021-10-27 18:46:41 +05:00
}
static struct module_attribute modinfo_initsize =
__ATTR ( initsize , 0444 , show_initsize , NULL ) ;
static ssize_t show_taint ( struct module_attribute * mattr ,
struct module_kobject * mk , char * buffer )
{
size_t l ;
l = module_flags_taint ( mk - > mod , buffer ) ;
buffer [ l + + ] = ' \n ' ;
return l ;
}
static struct module_attribute modinfo_taint =
__ATTR ( taint , 0444 , show_taint , NULL ) ;
static struct module_attribute * modinfo_attrs [ ] = {
& module_uevent ,
& modinfo_version ,
& modinfo_srcversion ,
& modinfo_initstate ,
& modinfo_coresize ,
& modinfo_initsize ,
& modinfo_taint ,
# ifdef CONFIG_MODULE_UNLOAD
& modinfo_refcnt ,
# endif
NULL ,
} ;
static const char vermagic [ ] = VERMAGIC_STRING ;
static int try_to_force_load ( struct module * mod , const char * reason )
{
# ifdef CONFIG_MODULE_FORCE_LOAD
if ( ! test_taint ( TAINT_FORCED_MODULE ) )
pr_warn ( " %s: %s: kernel tainted. \n " , mod - > name , reason ) ;
add_taint_module ( mod , TAINT_FORCED_MODULE , LOCKDEP_NOW_UNRELIABLE ) ;
return 0 ;
# else
return - ENOEXEC ;
# endif
}
# ifdef CONFIG_MODVERSIONS
2021-11-13 09:26:51 +05:00
/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
static unsigned long maybe_relocated ( unsigned long crc ,
const struct module * crc_owner )
2021-10-27 18:46:41 +05:00
{
2021-11-13 09:26:51 +05:00
# ifdef ARCH_RELOCATES_KCRCTAB
if ( crc_owner = = NULL )
return crc - ( unsigned long ) reloc_start ;
# endif
return crc ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
static int check_version ( Elf_Shdr * sechdrs ,
unsigned int versindex ,
2021-10-27 18:46:41 +05:00
const char * symname ,
struct module * mod ,
2021-11-13 09:26:51 +05:00
const unsigned long * crc ,
const struct module * crc_owner )
2021-10-27 18:46:41 +05:00
{
unsigned int i , num_versions ;
struct modversion_info * versions ;
/* Exporting module didn't supply crcs? OK, we're already tainted. */
if ( ! crc )
return 1 ;
/* No versions at all? modprobe --force does this. */
if ( versindex = = 0 )
return try_to_force_load ( mod , symname ) = = 0 ;
versions = ( void * ) sechdrs [ versindex ] . sh_addr ;
num_versions = sechdrs [ versindex ] . sh_size
/ sizeof ( struct modversion_info ) ;
for ( i = 0 ; i < num_versions ; i + + ) {
if ( strcmp ( versions [ i ] . name , symname ) ! = 0 )
continue ;
2021-11-13 09:26:51 +05:00
if ( versions [ i ] . crc = = maybe_relocated ( * crc , crc_owner ) )
2021-10-27 18:46:41 +05:00
return 1 ;
2021-11-13 09:26:51 +05:00
pr_debug ( " Found checksum %lX vs module %lX \n " ,
maybe_relocated ( * crc , crc_owner ) , versions [ i ] . crc ) ;
2021-10-27 18:46:41 +05:00
goto bad_version ;
}
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_GRKERNSEC_RANDSTRUCT
/*
* avoid potentially printing jibberish on attempted load
* of a module randomized with a different seed
*/
pr_warn_once ( " no symbol version for %s \n " , symname ) ;
# else
pr_warn_once ( " %s: no symbol version for %s \n " , mod - > name , symname ) ;
# endif
2021-10-27 18:46:41 +05:00
return 1 ;
bad_version :
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_GRKERNSEC_RANDSTRUCT
/*
* avoid potentially printing jibberish on attempted load
* of a module randomized with a different seed
*/
pr_warn ( " attempted module disagrees about version of symbol %s \n " ,
symname ) ;
# else
2021-10-27 18:46:41 +05:00
pr_warn ( " %s: disagrees about version of symbol %s \n " ,
2021-11-13 09:26:51 +05:00
mod - > name , symname ) ;
# endif
2021-10-27 18:46:41 +05:00
return 0 ;
}
2021-11-13 09:26:51 +05:00
static inline int check_modstruct_version ( Elf_Shdr * sechdrs ,
unsigned int versindex ,
2021-10-27 18:46:41 +05:00
struct module * mod )
{
2021-11-13 09:26:51 +05:00
const unsigned long * crc ;
2021-10-27 18:46:41 +05:00
/*
* Since this should be found in kernel ( which can ' t be removed ) , no
* locking is necessary - - use preempt_disable ( ) to placate lockdep .
*/
preempt_disable ( ) ;
2021-11-13 09:26:51 +05:00
if ( ! find_symbol ( VMLINUX_SYMBOL_STR ( module_layout ) , NULL ,
& crc , true , false ) ) {
2021-10-27 18:46:41 +05:00
preempt_enable ( ) ;
BUG ( ) ;
}
preempt_enable ( ) ;
2021-11-13 09:26:51 +05:00
return check_version ( sechdrs , versindex ,
VMLINUX_SYMBOL_STR ( module_layout ) , mod , crc ,
NULL ) ;
2021-10-27 18:46:41 +05:00
}
/* First part is kernel version, which we ignore if module has crcs. */
static inline int same_magic ( const char * amagic , const char * bmagic ,
bool has_crcs )
{
if ( has_crcs ) {
amagic + = strcspn ( amagic , " " ) ;
bmagic + = strcspn ( bmagic , " " ) ;
}
return strcmp ( amagic , bmagic ) = = 0 ;
}
# else
2021-11-13 09:26:51 +05:00
static inline int check_version ( Elf_Shdr * sechdrs ,
unsigned int versindex ,
2021-10-27 18:46:41 +05:00
const char * symname ,
struct module * mod ,
2021-11-13 09:26:51 +05:00
const unsigned long * crc ,
const struct module * crc_owner )
2021-10-27 18:46:41 +05:00
{
return 1 ;
}
2021-11-13 09:26:51 +05:00
static inline int check_modstruct_version ( Elf_Shdr * sechdrs ,
unsigned int versindex ,
2021-10-27 18:46:41 +05:00
struct module * mod )
{
return 1 ;
}
static inline int same_magic ( const char * amagic , const char * bmagic ,
bool has_crcs )
{
return strcmp ( amagic , bmagic ) = = 0 ;
}
# endif /* CONFIG_MODVERSIONS */
/* Resolve a symbol for this module. I.e. if we find one, record usage. */
static const struct kernel_symbol * resolve_symbol ( struct module * mod ,
const struct load_info * info ,
const char * name ,
char ownername [ ] )
{
2021-11-13 09:26:51 +05:00
struct module * owner ;
const struct kernel_symbol * sym ;
const unsigned long * crc ;
2021-10-27 18:46:41 +05:00
int err ;
/*
* The module_mutex should not be a heavily contended lock ;
* if we get the occasional sleep here , we ' ll go an extra iteration
* in the wait_event_interruptible ( ) , which is harmless .
*/
sched_annotate_sleep ( ) ;
mutex_lock ( & module_mutex ) ;
2021-11-13 09:26:51 +05:00
sym = find_symbol ( name , & owner , & crc ,
! ( mod - > taints & ( 1 < < TAINT_PROPRIETARY_MODULE ) ) , true ) ;
if ( ! sym )
2021-10-27 18:46:41 +05:00
goto unlock ;
2021-11-13 09:26:51 +05:00
if ( ! check_version ( info - > sechdrs , info - > index . vers , name , mod , crc ,
owner ) ) {
sym = ERR_PTR ( - EINVAL ) ;
2021-10-27 18:46:41 +05:00
goto getname ;
}
2021-11-13 09:26:51 +05:00
err = ref_module ( mod , owner ) ;
2021-10-27 18:46:41 +05:00
if ( err ) {
2021-11-13 09:26:51 +05:00
sym = ERR_PTR ( err ) ;
2021-10-27 18:46:41 +05:00
goto getname ;
}
getname :
/* We must make copy under the lock if we failed to get ref. */
2021-11-13 09:26:51 +05:00
strncpy ( ownername , module_name ( owner ) , MODULE_NAME_LEN ) ;
2021-10-27 18:46:41 +05:00
unlock :
mutex_unlock ( & module_mutex ) ;
2021-11-13 09:26:51 +05:00
return sym ;
2021-10-27 18:46:41 +05:00
}
static const struct kernel_symbol *
resolve_symbol_wait ( struct module * mod ,
const struct load_info * info ,
const char * name )
{
const struct kernel_symbol * ksym ;
char owner [ MODULE_NAME_LEN ] ;
if ( wait_event_interruptible_timeout ( module_wq ,
! IS_ERR ( ksym = resolve_symbol ( mod , info , name , owner ) )
| | PTR_ERR ( ksym ) ! = - EBUSY ,
30 * HZ ) < = 0 ) {
pr_warn ( " %s: gave up waiting for init of module %s. \n " ,
mod - > name , owner ) ;
}
return ksym ;
}
/*
* / sys / module / foo / sections stuff
* J . Corbet < corbet @ lwn . net >
*/
# ifdef CONFIG_SYSFS
2021-11-13 09:26:51 +05:00
# if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
static inline bool sect_empty ( const Elf_Shdr * sect )
{
return ! ( sect - > sh_flags & SHF_ALLOC ) | | sect - > sh_size = = 0 ;
}
2021-10-27 18:46:41 +05:00
struct module_sect_attr {
2021-11-13 09:26:51 +05:00
struct module_attribute mattr ;
char * name ;
2021-10-27 18:46:41 +05:00
unsigned long address ;
} ;
struct module_sect_attrs {
struct attribute_group grp ;
unsigned int nsections ;
2021-11-13 09:26:51 +05:00
struct module_sect_attr attrs [ 0 ] ;
2021-10-27 18:46:41 +05:00
} ;
2021-11-13 09:26:51 +05:00
static ssize_t module_sect_show ( struct module_attribute * mattr ,
struct module_kobject * mk , char * buf )
2021-10-27 18:46:41 +05:00
{
struct module_sect_attr * sattr =
2021-11-13 09:26:51 +05:00
container_of ( mattr , struct module_sect_attr , mattr ) ;
return sprintf ( buf , " 0x%pK \n " , ( void * ) sattr - > address ) ;
2021-10-27 18:46:41 +05:00
}
static void free_sect_attrs ( struct module_sect_attrs * sect_attrs )
{
unsigned int section ;
for ( section = 0 ; section < sect_attrs - > nsections ; section + + )
2021-11-13 09:26:51 +05:00
kfree ( sect_attrs - > attrs [ section ] . name ) ;
2021-10-27 18:46:41 +05:00
kfree ( sect_attrs ) ;
}
static void add_sect_attrs ( struct module * mod , const struct load_info * info )
{
unsigned int nloaded = 0 , i , size [ 2 ] ;
struct module_sect_attrs * sect_attrs ;
struct module_sect_attr * sattr ;
2021-11-13 09:26:51 +05:00
struct attribute * * gattr ;
2021-10-27 18:46:41 +05:00
/* Count loaded sections and allocate structures */
for ( i = 0 ; i < info - > hdr - > e_shnum ; i + + )
if ( ! sect_empty ( & info - > sechdrs [ i ] ) )
nloaded + + ;
2021-11-13 09:26:51 +05:00
size [ 0 ] = ALIGN ( sizeof ( * sect_attrs )
+ nloaded * sizeof ( sect_attrs - > attrs [ 0 ] ) ,
sizeof ( sect_attrs - > grp . attrs [ 0 ] ) ) ;
size [ 1 ] = ( nloaded + 1 ) * sizeof ( sect_attrs - > grp . attrs [ 0 ] ) ;
2021-10-27 18:46:41 +05:00
sect_attrs = kzalloc ( size [ 0 ] + size [ 1 ] , GFP_KERNEL ) ;
if ( sect_attrs = = NULL )
return ;
/* Setup section attributes. */
sect_attrs - > grp . name = " sections " ;
2021-11-13 09:26:51 +05:00
sect_attrs - > grp . attrs = ( void * ) sect_attrs + size [ 0 ] ;
2021-10-27 18:46:41 +05:00
sect_attrs - > nsections = 0 ;
sattr = & sect_attrs - > attrs [ 0 ] ;
2021-11-13 09:26:51 +05:00
gattr = & sect_attrs - > grp . attrs [ 0 ] ;
2021-10-27 18:46:41 +05:00
for ( i = 0 ; i < info - > hdr - > e_shnum ; i + + ) {
Elf_Shdr * sec = & info - > sechdrs [ i ] ;
if ( sect_empty ( sec ) )
continue ;
sattr - > address = sec - > sh_addr ;
2021-11-13 09:26:51 +05:00
sattr - > name = kstrdup ( info - > secstrings + sec - > sh_name ,
GFP_KERNEL ) ;
if ( sattr - > name = = NULL )
2021-10-27 18:46:41 +05:00
goto out ;
sect_attrs - > nsections + + ;
2021-11-13 09:26:51 +05:00
sysfs_attr_init ( & sattr - > mattr . attr ) ;
sattr - > mattr . show = module_sect_show ;
sattr - > mattr . store = NULL ;
sattr - > mattr . attr . name = sattr - > name ;
sattr - > mattr . attr . mode = S_IRUGO ;
* ( gattr + + ) = & ( sattr + + ) - > mattr . attr ;
2021-10-27 18:46:41 +05:00
}
* gattr = NULL ;
if ( sysfs_create_group ( & mod - > mkobj . kobj , & sect_attrs - > grp ) )
goto out ;
mod - > sect_attrs = sect_attrs ;
return ;
out :
free_sect_attrs ( sect_attrs ) ;
}
static void remove_sect_attrs ( struct module * mod )
{
if ( mod - > sect_attrs ) {
sysfs_remove_group ( & mod - > mkobj . kobj ,
& mod - > sect_attrs - > grp ) ;
2021-11-13 09:26:51 +05:00
/* We are positive that no one is using any sect attrs
* at this point . Deallocate immediately . */
2021-10-27 18:46:41 +05:00
free_sect_attrs ( mod - > sect_attrs ) ;
mod - > sect_attrs = NULL ;
}
}
/*
* / sys / module / foo / notes / . section . name gives contents of SHT_NOTE sections .
*/
struct module_notes_attrs {
struct kobject * dir ;
unsigned int notes ;
2021-11-13 09:26:51 +05:00
struct bin_attribute attrs [ 0 ] ;
2021-10-27 18:46:41 +05:00
} ;
static ssize_t module_notes_read ( struct file * filp , struct kobject * kobj ,
struct bin_attribute * bin_attr ,
char * buf , loff_t pos , size_t count )
{
/*
* The caller checked the pos and count against our size .
*/
memcpy ( buf , bin_attr - > private + pos , count ) ;
return count ;
}
static void free_notes_attrs ( struct module_notes_attrs * notes_attrs ,
unsigned int i )
{
if ( notes_attrs - > dir ) {
while ( i - - > 0 )
sysfs_remove_bin_file ( notes_attrs - > dir ,
& notes_attrs - > attrs [ i ] ) ;
kobject_put ( notes_attrs - > dir ) ;
}
kfree ( notes_attrs ) ;
}
static void add_notes_attrs ( struct module * mod , const struct load_info * info )
{
unsigned int notes , loaded , i ;
struct module_notes_attrs * notes_attrs ;
2021-11-13 09:26:51 +05:00
bin_attribute_no_const * nattr ;
2021-10-27 18:46:41 +05:00
/* failed to create section attributes, so can't create notes */
if ( ! mod - > sect_attrs )
return ;
/* Count notes sections and allocate structures. */
notes = 0 ;
for ( i = 0 ; i < info - > hdr - > e_shnum ; i + + )
if ( ! sect_empty ( & info - > sechdrs [ i ] ) & &
( info - > sechdrs [ i ] . sh_type = = SHT_NOTE ) )
+ + notes ;
if ( notes = = 0 )
return ;
2021-11-13 09:26:51 +05:00
notes_attrs = kzalloc ( sizeof ( * notes_attrs )
+ notes * sizeof ( notes_attrs - > attrs [ 0 ] ) ,
2021-10-27 18:46:41 +05:00
GFP_KERNEL ) ;
if ( notes_attrs = = NULL )
return ;
notes_attrs - > notes = notes ;
nattr = & notes_attrs - > attrs [ 0 ] ;
for ( loaded = i = 0 ; i < info - > hdr - > e_shnum ; + + i ) {
if ( sect_empty ( & info - > sechdrs [ i ] ) )
continue ;
if ( info - > sechdrs [ i ] . sh_type = = SHT_NOTE ) {
sysfs_bin_attr_init ( nattr ) ;
2021-11-13 09:26:51 +05:00
nattr - > attr . name = mod - > sect_attrs - > attrs [ loaded ] . name ;
2021-10-27 18:46:41 +05:00
nattr - > attr . mode = S_IRUGO ;
nattr - > size = info - > sechdrs [ i ] . sh_size ;
nattr - > private = ( void * ) info - > sechdrs [ i ] . sh_addr ;
nattr - > read = module_notes_read ;
+ + nattr ;
}
+ + loaded ;
}
notes_attrs - > dir = kobject_create_and_add ( " notes " , & mod - > mkobj . kobj ) ;
if ( ! notes_attrs - > dir )
goto out ;
for ( i = 0 ; i < notes ; + + i )
if ( sysfs_create_bin_file ( notes_attrs - > dir ,
& notes_attrs - > attrs [ i ] ) )
goto out ;
mod - > notes_attrs = notes_attrs ;
return ;
out :
free_notes_attrs ( notes_attrs , i ) ;
}
static void remove_notes_attrs ( struct module * mod )
{
if ( mod - > notes_attrs )
free_notes_attrs ( mod - > notes_attrs , mod - > notes_attrs - > notes ) ;
}
# else
static inline void add_sect_attrs ( struct module * mod ,
const struct load_info * info )
{
}
static inline void remove_sect_attrs ( struct module * mod )
{
}
static inline void add_notes_attrs ( struct module * mod ,
const struct load_info * info )
{
}
static inline void remove_notes_attrs ( struct module * mod )
{
}
# endif /* CONFIG_KALLSYMS */
2021-11-13 09:26:51 +05:00
static void add_usage_links ( struct module * mod )
2021-10-27 18:46:41 +05:00
{
# ifdef CONFIG_MODULE_UNLOAD
struct module_use * use ;
2021-11-13 09:26:51 +05:00
int nowarn ;
2021-10-27 18:46:41 +05:00
mutex_lock ( & module_mutex ) ;
2021-11-13 09:26:51 +05:00
list_for_each_entry ( use , & mod - > target_list , target_list ) {
nowarn = sysfs_create_link ( use - > target - > holders_dir ,
& mod - > mkobj . kobj , mod - > name ) ;
}
2021-10-27 18:46:41 +05:00
mutex_unlock ( & module_mutex ) ;
# endif
}
2021-11-13 09:26:51 +05:00
static void del_usage_links ( struct module * mod )
2021-10-27 18:46:41 +05:00
{
# ifdef CONFIG_MODULE_UNLOAD
struct module_use * use ;
mutex_lock ( & module_mutex ) ;
2021-11-13 09:26:51 +05:00
list_for_each_entry ( use , & mod - > target_list , target_list )
sysfs_remove_link ( use - > target - > holders_dir , mod - > name ) ;
2021-10-27 18:46:41 +05:00
mutex_unlock ( & module_mutex ) ;
# endif
}
static int module_add_modinfo_attrs ( struct module * mod )
{
struct module_attribute * attr ;
2021-11-13 09:26:51 +05:00
module_attribute_no_const * temp_attr ;
2021-10-27 18:46:41 +05:00
int error = 0 ;
int i ;
mod - > modinfo_attrs = kzalloc ( ( sizeof ( struct module_attribute ) *
( ARRAY_SIZE ( modinfo_attrs ) + 1 ) ) ,
GFP_KERNEL ) ;
if ( ! mod - > modinfo_attrs )
return - ENOMEM ;
temp_attr = mod - > modinfo_attrs ;
2021-11-13 09:26:51 +05:00
for ( i = 0 ; ( attr = modinfo_attrs [ i ] ) & & ! error ; i + + ) {
2021-10-27 18:46:41 +05:00
if ( ! attr - > test | | attr - > test ( mod ) ) {
memcpy ( temp_attr , attr , sizeof ( * temp_attr ) ) ;
sysfs_attr_init ( & temp_attr - > attr ) ;
error = sysfs_create_file ( & mod - > mkobj . kobj ,
& temp_attr - > attr ) ;
+ + temp_attr ;
}
}
return error ;
}
2021-11-13 09:26:51 +05:00
static void module_remove_modinfo_attrs ( struct module * mod )
2021-10-27 18:46:41 +05:00
{
struct module_attribute * attr ;
int i ;
for ( i = 0 ; ( attr = & mod - > modinfo_attrs [ i ] ) ; i + + ) {
/* pick a field to test for end of list */
if ( ! attr - > attr . name )
break ;
sysfs_remove_file ( & mod - > mkobj . kobj , & attr - > attr ) ;
if ( attr - > free )
attr - > free ( mod ) ;
}
kfree ( mod - > modinfo_attrs ) ;
}
static void mod_kobject_put ( struct module * mod )
{
DECLARE_COMPLETION_ONSTACK ( c ) ;
mod - > mkobj . kobj_completion = & c ;
kobject_put ( & mod - > mkobj . kobj ) ;
wait_for_completion ( & c ) ;
}
static int mod_sysfs_init ( struct module * mod )
{
int err ;
struct kobject * kobj ;
if ( ! module_sysfs_initialized ) {
pr_err ( " %s: module sysfs not initialized \n " , mod - > name ) ;
err = - EINVAL ;
goto out ;
}
kobj = kset_find_obj ( module_kset , mod - > name ) ;
if ( kobj ) {
pr_err ( " %s: module is already loaded \n " , mod - > name ) ;
kobject_put ( kobj ) ;
err = - EINVAL ;
goto out ;
}
mod - > mkobj . mod = mod ;
memset ( & mod - > mkobj . kobj , 0 , sizeof ( mod - > mkobj . kobj ) ) ;
mod - > mkobj . kobj . kset = module_kset ;
err = kobject_init_and_add ( & mod - > mkobj . kobj , & module_ktype , NULL ,
" %s " , mod - > name ) ;
if ( err )
mod_kobject_put ( mod ) ;
2021-11-13 09:26:51 +05:00
/* delay uevent until full sysfs population */
2021-10-27 18:46:41 +05:00
out :
return err ;
}
static int mod_sysfs_setup ( struct module * mod ,
const struct load_info * info ,
struct kernel_param * kparam ,
unsigned int num_params )
{
int err ;
err = mod_sysfs_init ( mod ) ;
if ( err )
goto out ;
mod - > holders_dir = kobject_create_and_add ( " holders " , & mod - > mkobj . kobj ) ;
if ( ! mod - > holders_dir ) {
err = - ENOMEM ;
goto out_unreg ;
}
err = module_param_sysfs_setup ( mod , kparam , num_params ) ;
if ( err )
goto out_unreg_holders ;
err = module_add_modinfo_attrs ( mod ) ;
if ( err )
goto out_unreg_param ;
2021-11-13 09:26:51 +05:00
add_usage_links ( mod ) ;
2021-10-27 18:46:41 +05:00
add_sect_attrs ( mod , info ) ;
add_notes_attrs ( mod , info ) ;
2021-11-13 09:26:51 +05:00
kobject_uevent ( & mod - > mkobj . kobj , KOBJ_ADD ) ;
2021-10-27 18:46:41 +05:00
return 0 ;
out_unreg_param :
module_param_sysfs_remove ( mod ) ;
out_unreg_holders :
kobject_put ( mod - > holders_dir ) ;
out_unreg :
mod_kobject_put ( mod ) ;
out :
return err ;
}
static void mod_sysfs_fini ( struct module * mod )
{
remove_notes_attrs ( mod ) ;
remove_sect_attrs ( mod ) ;
mod_kobject_put ( mod ) ;
}
static void init_param_lock ( struct module * mod )
{
mutex_init ( & mod - > param_lock ) ;
}
# else /* !CONFIG_SYSFS */
static int mod_sysfs_setup ( struct module * mod ,
const struct load_info * info ,
struct kernel_param * kparam ,
unsigned int num_params )
{
return 0 ;
}
static void mod_sysfs_fini ( struct module * mod )
{
}
2021-11-13 09:26:51 +05:00
static void module_remove_modinfo_attrs ( struct module * mod )
2021-10-27 18:46:41 +05:00
{
}
static void del_usage_links ( struct module * mod )
{
}
static void init_param_lock ( struct module * mod )
{
}
# endif /* CONFIG_SYSFS */
static void mod_sysfs_teardown ( struct module * mod )
{
del_usage_links ( mod ) ;
2021-11-13 09:26:51 +05:00
module_remove_modinfo_attrs ( mod ) ;
2021-10-27 18:46:41 +05:00
module_param_sysfs_remove ( mod ) ;
kobject_put ( mod - > mkobj . drivers_dir ) ;
kobject_put ( mod - > holders_dir ) ;
mod_sysfs_fini ( mod ) ;
}
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_DEBUG_SET_MODULE_RONX
2021-10-27 18:46:41 +05:00
/*
* LKM RO / NX protection : protect module ' s text / ro - data
* from modification and any data from execution .
*
* General layout of module is :
* [ text ] [ read - only - data ] [ ro - after - init ] [ writable data ]
* text_size - - - - - ^ ^ ^ ^
* ro_size - - - - - - - - - - - - - - - - - - - - - - - - | | |
* ro_after_init_size - - - - - - - - - - - - - - - - - - - - - - - - - - - - - | |
* size - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
*
* These values are always page - aligned ( as is base )
*/
static void frob_text ( const struct module_layout * layout ,
int ( * set_memory ) ( unsigned long start , int num_pages ) )
{
2021-11-13 09:26:51 +05:00
BUG_ON ( ( unsigned long ) layout - > base_rx & ( PAGE_SIZE - 1 ) ) ;
BUG_ON ( ( unsigned long ) layout - > size_rx & ( PAGE_SIZE - 1 ) ) ;
set_memory ( ( unsigned long ) layout - > base_rx ,
layout - > size_rx > > PAGE_SHIFT ) ;
2021-10-27 18:46:41 +05:00
}
static void frob_rodata ( const struct module_layout * layout ,
int ( * set_memory ) ( unsigned long start , int num_pages ) )
{
2021-11-13 09:26:51 +05:00
BUG_ON ( ( unsigned long ) layout - > base_rx & ( PAGE_SIZE - 1 ) ) ;
BUG_ON ( ( unsigned long ) layout - > size_rx & ( PAGE_SIZE - 1 ) ) ;
// BUG_ON((unsigned long)layout->size_ro & (PAGE_SIZE-1));
// set_memory((unsigned long)layout->base_rx + layout->size_rx,
// (layout->size_ro - layout->size_rx) >> PAGE_SHIFT);
2021-10-27 18:46:41 +05:00
}
static void frob_ro_after_init ( const struct module_layout * layout ,
int ( * set_memory ) ( unsigned long start , int num_pages ) )
{
2021-11-13 09:26:51 +05:00
#if 0
BUG_ON ( ( unsigned long ) layout - > base_rx & ( PAGE_SIZE - 1 ) ) ;
2021-10-27 18:46:41 +05:00
BUG_ON ( ( unsigned long ) layout - > ro_size & ( PAGE_SIZE - 1 ) ) ;
BUG_ON ( ( unsigned long ) layout - > ro_after_init_size & ( PAGE_SIZE - 1 ) ) ;
set_memory ( ( unsigned long ) layout - > base + layout - > ro_size ,
( layout - > ro_after_init_size - layout - > ro_size ) > > PAGE_SHIFT ) ;
2021-11-13 09:26:51 +05:00
# endif
2021-10-27 18:46:41 +05:00
}
static void frob_writable_data ( const struct module_layout * layout ,
int ( * set_memory ) ( unsigned long start , int num_pages ) )
{
2021-11-13 09:26:51 +05:00
BUG_ON ( ( unsigned long ) layout - > base_rw & ( PAGE_SIZE - 1 ) ) ;
BUG_ON ( ( unsigned long ) layout - > size_rw & ( PAGE_SIZE - 1 ) ) ;
set_memory ( ( unsigned long ) layout - > base_rw , layout - > size_rw > > PAGE_SHIFT ) ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
/* livepatching wants to disable read-only so it can frob module. */
void module_disable_ro ( const struct module * mod )
2021-10-27 18:46:41 +05:00
{
2021-11-13 09:26:51 +05:00
frob_text ( & mod - > core_layout , set_memory_rw ) ;
frob_rodata ( & mod - > core_layout , set_memory_rw ) ;
frob_ro_after_init ( & mod - > core_layout , set_memory_rw ) ;
frob_text ( & mod - > init_layout , set_memory_rw ) ;
frob_rodata ( & mod - > init_layout , set_memory_rw ) ;
}
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
void module_enable_ro ( const struct module * mod , bool after_init )
{
2021-10-27 18:46:41 +05:00
frob_text ( & mod - > core_layout , set_memory_ro ) ;
frob_rodata ( & mod - > core_layout , set_memory_ro ) ;
frob_text ( & mod - > init_layout , set_memory_ro ) ;
frob_rodata ( & mod - > init_layout , set_memory_ro ) ;
if ( after_init )
frob_ro_after_init ( & mod - > core_layout , set_memory_ro ) ;
}
static void module_enable_nx ( const struct module * mod )
{
frob_rodata ( & mod - > core_layout , set_memory_nx ) ;
frob_ro_after_init ( & mod - > core_layout , set_memory_nx ) ;
frob_writable_data ( & mod - > core_layout , set_memory_nx ) ;
frob_rodata ( & mod - > init_layout , set_memory_nx ) ;
frob_writable_data ( & mod - > init_layout , set_memory_nx ) ;
}
2021-11-13 09:26:51 +05:00
static void module_disable_nx ( const struct module * mod )
2021-10-27 18:46:41 +05:00
{
2021-11-13 09:26:51 +05:00
frob_rodata ( & mod - > core_layout , set_memory_x ) ;
frob_ro_after_init ( & mod - > core_layout , set_memory_x ) ;
frob_writable_data ( & mod - > core_layout , set_memory_x ) ;
frob_rodata ( & mod - > init_layout , set_memory_x ) ;
frob_writable_data ( & mod - > init_layout , set_memory_x ) ;
}
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
/* Iterate through all modules and set each module's text as RW */
void set_all_modules_text_rw ( void )
{
struct module * mod ;
mutex_lock ( & module_mutex ) ;
list_for_each_entry_rcu ( mod , & modules , list ) {
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
frob_text ( & mod - > core_layout , set_memory_rw ) ;
frob_text ( & mod - > init_layout , set_memory_rw ) ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
mutex_unlock ( & module_mutex ) ;
}
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
/* Iterate through all modules and set each module's text as RO */
void set_all_modules_text_ro ( void )
{
struct module * mod ;
mutex_lock ( & module_mutex ) ;
list_for_each_entry_rcu ( mod , & modules , list ) {
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
frob_text ( & mod - > core_layout , set_memory_ro ) ;
frob_text ( & mod - > init_layout , set_memory_ro ) ;
}
mutex_unlock ( & module_mutex ) ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
static void disable_ro_nx ( const struct module_layout * layout )
2021-10-27 18:46:41 +05:00
{
2021-11-13 09:26:51 +05:00
frob_text ( layout , set_memory_rw ) ;
frob_rodata ( layout , set_memory_rw ) ;
frob_rodata ( layout , set_memory_x ) ;
frob_ro_after_init ( layout , set_memory_rw ) ;
frob_ro_after_init ( layout , set_memory_x ) ;
frob_writable_data ( layout , set_memory_x ) ;
}
# else
static void disable_ro_nx ( const struct module_layout * layout )
{
# ifdef CONFIG_PAX_KERNEXEC
set_memory_nx ( ( unsigned long ) layout - > base_rx , PFN_UP ( layout - > size_rx ) ) ;
set_memory_rw ( ( unsigned long ) layout - > base_rx , PFN_UP ( layout - > size_rx ) ) ;
# endif
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
static void module_enable_nx ( const struct module * mod ) { }
static void module_disable_nx ( const struct module * mod ) { }
# endif
2021-10-27 18:46:41 +05:00
# ifdef CONFIG_LIVEPATCH
/*
* Persist Elf information about a module . Copy the Elf header ,
* section header table , section string table , and symtab section
* index from info to mod - > klp_info .
*/
static int copy_module_elf ( struct module * mod , struct load_info * info )
{
unsigned int size , symndx ;
int ret ;
size = sizeof ( * mod - > klp_info ) ;
mod - > klp_info = kmalloc ( size , GFP_KERNEL ) ;
if ( mod - > klp_info = = NULL )
return - ENOMEM ;
/* Elf header */
size = sizeof ( mod - > klp_info - > hdr ) ;
memcpy ( & mod - > klp_info - > hdr , info - > hdr , size ) ;
/* Elf section header table */
size = sizeof ( * info - > sechdrs ) * info - > hdr - > e_shnum ;
2021-11-13 09:26:51 +05:00
mod - > klp_info - > sechdrs = kmalloc ( size , GFP_KERNEL ) ;
2021-10-27 18:46:41 +05:00
if ( mod - > klp_info - > sechdrs = = NULL ) {
ret = - ENOMEM ;
goto free_info ;
}
2021-11-13 09:26:51 +05:00
memcpy ( mod - > klp_info - > sechdrs , info - > sechdrs , size ) ;
2021-10-27 18:46:41 +05:00
/* Elf section name string table */
size = info - > sechdrs [ info - > hdr - > e_shstrndx ] . sh_size ;
2021-11-13 09:26:51 +05:00
mod - > klp_info - > secstrings = kmalloc ( size , GFP_KERNEL ) ;
2021-10-27 18:46:41 +05:00
if ( mod - > klp_info - > secstrings = = NULL ) {
ret = - ENOMEM ;
goto free_sechdrs ;
}
2021-11-13 09:26:51 +05:00
memcpy ( mod - > klp_info - > secstrings , info - > secstrings , size ) ;
2021-10-27 18:46:41 +05:00
/* Elf symbol section index */
symndx = info - > index . sym ;
mod - > klp_info - > symndx = symndx ;
/*
* For livepatch modules , core_kallsyms . symtab is a complete
* copy of the original symbol table . Adjust sh_addr to point
* to core_kallsyms . symtab since the copy of the symtab in module
* init memory is freed at the end of do_init_module ( ) .
*/
mod - > klp_info - > sechdrs [ symndx ] . sh_addr = \
( unsigned long ) mod - > core_kallsyms . symtab ;
return 0 ;
free_sechdrs :
kfree ( mod - > klp_info - > sechdrs ) ;
free_info :
kfree ( mod - > klp_info ) ;
return ret ;
}
static void free_module_elf ( struct module * mod )
{
kfree ( mod - > klp_info - > sechdrs ) ;
kfree ( mod - > klp_info - > secstrings ) ;
kfree ( mod - > klp_info ) ;
}
# else /* !CONFIG_LIVEPATCH */
static int copy_module_elf ( struct module * mod , struct load_info * info )
{
return 0 ;
}
static void free_module_elf ( struct module * mod )
{
}
# endif /* CONFIG_LIVEPATCH */
void __weak module_memfree ( void * module_region )
{
vfree ( module_region ) ;
}
void __weak module_arch_cleanup ( struct module * mod )
{
}
void __weak module_arch_freeing_init ( struct module * mod )
{
}
/* Free a module, remove from lists, etc. */
static void free_module ( struct module * mod )
{
trace_module_free ( mod ) ;
mod_sysfs_teardown ( mod ) ;
2021-11-13 09:26:51 +05:00
/* We leave it in list to prevent duplicate loads, but make sure
* that noone uses it while it ' s being deconstructed . */
2021-10-27 18:46:41 +05:00
mutex_lock ( & module_mutex ) ;
mod - > state = MODULE_STATE_UNFORMED ;
mutex_unlock ( & module_mutex ) ;
/* Remove dynamic debug info */
ddebug_remove_module ( mod - > name ) ;
/* Arch-specific cleanup. */
module_arch_cleanup ( mod ) ;
/* Module unload stuff */
module_unload_free ( mod ) ;
/* Free any allocated parameters. */
destroy_params ( mod - > kp , mod - > num_kp ) ;
if ( is_livepatch_module ( mod ) )
free_module_elf ( mod ) ;
/* Now we can delete it from the lists */
mutex_lock ( & module_mutex ) ;
/* Unlink carefully: kallsyms could be walking list. */
list_del_rcu ( & mod - > list ) ;
mod_tree_remove ( mod ) ;
/* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup ( mod ) ;
/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2021-11-13 09:26:51 +05:00
synchronize_sched ( ) ;
2021-10-27 18:46:41 +05:00
mutex_unlock ( & module_mutex ) ;
/* This may be empty, but that's OK */
2021-11-13 09:26:51 +05:00
disable_ro_nx ( & mod - > init_layout ) ;
2021-10-27 18:46:41 +05:00
module_arch_freeing_init ( mod ) ;
2021-11-13 09:26:51 +05:00
module_memfree ( mod - > init_layout . base_rw ) ;
module_memfree_exec ( mod - > init_layout . base_rx ) ;
2021-10-27 18:46:41 +05:00
kfree ( mod - > args ) ;
percpu_modfree ( mod ) ;
/* Free lock-classes; relies on the preceding sync_rcu(). */
2021-11-13 09:26:51 +05:00
lockdep_free_key_range ( mod - > core_layout . base_rw , mod - > core_layout . size_rw ) ;
lockdep_free_key_range ( mod - > core_layout . base_rx , mod - > core_layout . size_rx ) ;
2021-10-27 18:46:41 +05:00
/* Finally, free the core (containing the module structure) */
2021-11-13 09:26:51 +05:00
disable_ro_nx ( & mod - > core_layout ) ;
module_memfree_exec ( mod - > core_layout . base_rx ) ;
module_memfree ( mod - > core_layout . base_rw ) ;
# ifdef CONFIG_MPU
update_protections ( current - > mm ) ;
# endif
2021-10-27 18:46:41 +05:00
}
void * __symbol_get ( const char * symbol )
{
2021-11-13 09:26:51 +05:00
struct module * owner ;
const struct kernel_symbol * sym ;
2021-10-27 18:46:41 +05:00
preempt_disable ( ) ;
2021-11-13 09:26:51 +05:00
sym = find_symbol ( symbol , & owner , NULL , true , true ) ;
if ( sym & & strong_try_module_get ( owner ) )
sym = NULL ;
2021-10-27 18:46:41 +05:00
preempt_enable ( ) ;
2021-11-13 09:26:51 +05:00
return sym ? ( void * ) sym - > value : NULL ;
2021-10-27 18:46:41 +05:00
}
EXPORT_SYMBOL_GPL ( __symbol_get ) ;
/*
* Ensure that an exported symbol [ global namespace ] does not already exist
* in the kernel or in some other module ' s exported symbol table .
*
* You must hold the module_mutex .
*/
2021-11-13 09:26:51 +05:00
static int verify_export_symbols ( struct module * mod )
2021-10-27 18:46:41 +05:00
{
unsigned int i ;
2021-11-13 09:26:51 +05:00
struct module * owner ;
2021-10-27 18:46:41 +05:00
const struct kernel_symbol * s ;
struct {
const struct kernel_symbol * sym ;
unsigned int num ;
} arr [ ] = {
{ mod - > syms , mod - > num_syms } ,
{ mod - > gpl_syms , mod - > num_gpl_syms } ,
2021-11-13 09:26:51 +05:00
{ mod - > gpl_future_syms , mod - > num_gpl_future_syms } ,
# ifdef CONFIG_UNUSED_SYMBOLS
{ mod - > unused_syms , mod - > num_unused_syms } ,
{ mod - > unused_gpl_syms , mod - > num_unused_gpl_syms } ,
# endif
2021-10-27 18:46:41 +05:00
} ;
for ( i = 0 ; i < ARRAY_SIZE ( arr ) ; i + + ) {
for ( s = arr [ i ] . sym ; s < arr [ i ] . sym + arr [ i ] . num ; s + + ) {
2021-11-13 09:26:51 +05:00
if ( find_symbol ( s - > name , & owner , NULL , true , false ) ) {
2021-10-27 18:46:41 +05:00
pr_err ( " %s: exports duplicate symbol %s "
" (owned by %s) \n " ,
2021-11-13 09:26:51 +05:00
mod - > name , s - > name , module_name ( owner ) ) ;
2021-10-27 18:46:41 +05:00
return - ENOEXEC ;
}
}
}
return 0 ;
}
/* Change all symbols so that st_value encodes the pointer directly. */
static int simplify_symbols ( struct module * mod , const struct load_info * info )
{
Elf_Shdr * symsec = & info - > sechdrs [ info - > index . sym ] ;
Elf_Sym * sym = ( void * ) symsec - > sh_addr ;
unsigned long secbase ;
unsigned int i ;
int ret = 0 ;
const struct kernel_symbol * ksym ;
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_GRKERNSEC_MODHARDEN
int is_fs_load = 0 ;
int register_filesystem_found = 0 ;
char * p ;
p = strstr ( mod - > args , " grsec_modharden_fs " ) ;
if ( p ) {
char * endptr = p + sizeof ( " grsec_modharden_fs " ) - 1 ;
/* copy \0 as well */
memmove ( p , endptr , strlen ( mod - > args ) - ( unsigned int ) ( endptr - mod - > args ) + 1 ) ;
is_fs_load = 1 ;
}
# endif
2021-10-27 18:46:41 +05:00
for ( i = 1 ; i < symsec - > sh_size / sizeof ( Elf_Sym ) ; i + + ) {
const char * name = info - > strtab + sym [ i ] . st_name ;
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_GRKERNSEC_MODHARDEN
/* it's a real shame this will never get ripped and copied
upstream ! ; (
*/
if ( is_fs_load & & ! strcmp ( name , " register_filesystem " ) )
register_filesystem_found = 1 ;
# endif
2021-10-27 18:46:41 +05:00
switch ( sym [ i ] . st_shndx ) {
case SHN_COMMON :
/* Ignore common symbols */
if ( ! strncmp ( name , " __gnu_lto " , 9 ) )
break ;
2021-11-13 09:26:51 +05:00
/* We compiled with -fno-common. These are not
supposed to happen . */
2021-10-27 18:46:41 +05:00
pr_debug ( " Common symbol: %s \n " , name ) ;
pr_warn ( " %s: please compile with -fno-common \n " ,
mod - > name ) ;
ret = - ENOEXEC ;
break ;
case SHN_ABS :
/* Don't need to do anything */
pr_debug ( " Absolute symbol: 0x%08lx \n " ,
( long ) sym [ i ] . st_value ) ;
break ;
case SHN_LIVEPATCH :
/* Livepatch symbols are resolved by livepatch */
break ;
case SHN_UNDEF :
ksym = resolve_symbol_wait ( mod , info , name ) ;
/* Ok if resolved. */
if ( ksym & & ! IS_ERR ( ksym ) ) {
2021-11-13 09:26:51 +05:00
pax_open_kernel ( ) ;
sym [ i ] . st_value = ksym - > value ;
pax_close_kernel ( ) ;
2021-10-27 18:46:41 +05:00
break ;
}
2021-11-13 09:26:51 +05:00
/* Ok if weak. */
if ( ! ksym & & ELF_ST_BIND ( sym [ i ] . st_info ) = = STB_WEAK )
2021-10-27 18:46:41 +05:00
break ;
2021-11-13 09:26:51 +05:00
pr_warn ( " %s: Unknown symbol %s (err %li) \n " ,
mod - > name , name , PTR_ERR ( ksym ) ) ;
2021-10-27 18:46:41 +05:00
ret = PTR_ERR ( ksym ) ? : - ENOENT ;
break ;
default :
/* Divert to percpu allocation if a percpu var. */
if ( sym [ i ] . st_shndx = = info - > index . pcpu )
secbase = ( unsigned long ) mod_percpu ( mod ) ;
else
secbase = info - > sechdrs [ sym [ i ] . st_shndx ] . sh_addr ;
2021-11-13 09:26:51 +05:00
pax_open_kernel ( ) ;
2021-10-27 18:46:41 +05:00
sym [ i ] . st_value + = secbase ;
2021-11-13 09:26:51 +05:00
pax_close_kernel ( ) ;
2021-10-27 18:46:41 +05:00
break ;
}
}
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_GRKERNSEC_MODHARDEN
if ( is_fs_load & & ! register_filesystem_found ) {
printk ( KERN_ALERT " grsec: Denied attempt to load non-fs module %.64s through mount \n " , mod - > name ) ;
ret = - EPERM ;
}
# endif
2021-10-27 18:46:41 +05:00
return ret ;
}
static int apply_relocations ( struct module * mod , const struct load_info * info )
{
unsigned int i ;
int err = 0 ;
/* Now do relocations. */
for ( i = 1 ; i < info - > hdr - > e_shnum ; i + + ) {
unsigned int infosec = info - > sechdrs [ i ] . sh_info ;
/* Not a valid relocation section? */
if ( infosec > = info - > hdr - > e_shnum )
continue ;
/* Don't bother with non-allocated sections */
if ( ! ( info - > sechdrs [ infosec ] . sh_flags & SHF_ALLOC ) )
continue ;
2021-11-13 09:26:51 +05:00
/* Livepatch relocation sections are applied by livepatch */
2021-10-27 18:46:41 +05:00
if ( info - > sechdrs [ i ] . sh_flags & SHF_RELA_LIVEPATCH )
2021-11-13 09:26:51 +05:00
continue ;
if ( info - > sechdrs [ i ] . sh_type = = SHT_REL )
2021-10-27 18:46:41 +05:00
err = apply_relocate ( info - > sechdrs , info - > strtab ,
info - > index . sym , i , mod ) ;
else if ( info - > sechdrs [ i ] . sh_type = = SHT_RELA )
err = apply_relocate_add ( info - > sechdrs , info - > strtab ,
info - > index . sym , i , mod ) ;
if ( err < 0 )
break ;
}
return err ;
}
/* Additional bytes needed by arch in front of individual sections */
unsigned int __weak arch_mod_section_prepend ( struct module * mod ,
unsigned int section )
{
/* default implementation just returns zero */
return 0 ;
}
/* Update size with this section: return offset. */
static long get_offset ( struct module * mod , unsigned int * size ,
Elf_Shdr * sechdr , unsigned int section )
{
long ret ;
* size + = arch_mod_section_prepend ( mod , section ) ;
ret = ALIGN ( * size , sechdr - > sh_addralign ? : 1 ) ;
* size = ret + sechdr - > sh_size ;
return ret ;
}
2021-11-13 09:26:51 +05:00
/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
might - - code , read - only data , read - write data , small data . Tally
sizes , and place the offsets into sh_entsize fields : high bit means it
belongs in init . */
2021-10-27 18:46:41 +05:00
static void layout_sections ( struct module * mod , struct load_info * info )
{
static unsigned long const masks [ ] [ 2 ] = {
2021-11-13 09:26:51 +05:00
/* NOTE: all executable code must be the first section
2021-10-27 18:46:41 +05:00
* in this array ; otherwise modify the text_size
2021-11-13 09:26:51 +05:00
* finder in the two loops below */
2021-10-27 18:46:41 +05:00
{ SHF_EXECINSTR | SHF_ALLOC , ARCH_SHF_SMALL } ,
{ SHF_ALLOC , SHF_WRITE | ARCH_SHF_SMALL } ,
{ SHF_RO_AFTER_INIT | SHF_ALLOC , ARCH_SHF_SMALL } ,
{ SHF_WRITE | SHF_ALLOC , ARCH_SHF_SMALL } ,
{ ARCH_SHF_SMALL | SHF_ALLOC , 0 }
} ;
unsigned int m , i ;
for ( i = 0 ; i < info - > hdr - > e_shnum ; i + + )
info - > sechdrs [ i ] . sh_entsize = ~ 0UL ;
pr_debug ( " Core section allocation order: \n " ) ;
for ( m = 0 ; m < ARRAY_SIZE ( masks ) ; + + m ) {
for ( i = 0 ; i < info - > hdr - > e_shnum ; + + i ) {
Elf_Shdr * s = & info - > sechdrs [ i ] ;
const char * sname = info - > secstrings + s - > sh_name ;
if ( ( s - > sh_flags & masks [ m ] [ 0 ] ) ! = masks [ m ] [ 0 ]
| | ( s - > sh_flags & masks [ m ] [ 1 ] )
| | s - > sh_entsize ! = ~ 0UL
2021-11-13 09:26:51 +05:00
| | strstarts ( sname , " .init " ) )
2021-10-27 18:46:41 +05:00
continue ;
2021-11-13 09:26:51 +05:00
if ( ( s - > sh_flags & SHF_WRITE ) | | ! ( s - > sh_flags & SHF_ALLOC ) )
s - > sh_entsize = get_offset ( mod , & mod - > core_layout . size_rw , s , i ) ;
else
s - > sh_entsize = get_offset ( mod , & mod - > core_layout . size_rx , s , i ) ;
2021-10-27 18:46:41 +05:00
pr_debug ( " \t %s \n " , sname ) ;
}
}
pr_debug ( " Init section allocation order: \n " ) ;
for ( m = 0 ; m < ARRAY_SIZE ( masks ) ; + + m ) {
for ( i = 0 ; i < info - > hdr - > e_shnum ; + + i ) {
Elf_Shdr * s = & info - > sechdrs [ i ] ;
const char * sname = info - > secstrings + s - > sh_name ;
if ( ( s - > sh_flags & masks [ m ] [ 0 ] ) ! = masks [ m ] [ 0 ]
| | ( s - > sh_flags & masks [ m ] [ 1 ] )
| | s - > sh_entsize ! = ~ 0UL
2021-11-13 09:26:51 +05:00
| | ! strstarts ( sname , " .init " ) )
2021-10-27 18:46:41 +05:00
continue ;
2021-11-13 09:26:51 +05:00
if ( ( s - > sh_flags & SHF_WRITE ) | | ! ( s - > sh_flags & SHF_ALLOC ) )
s - > sh_entsize = get_offset ( mod , & mod - > init_layout . size_rw , s , i ) ;
else
s - > sh_entsize = get_offset ( mod , & mod - > init_layout . size_rx , s , i ) ;
s - > sh_entsize | = INIT_OFFSET_MASK ;
2021-10-27 18:46:41 +05:00
pr_debug ( " \t %s \n " , sname ) ;
}
}
}
static void set_license ( struct module * mod , const char * license )
{
if ( ! license )
license = " unspecified " ;
if ( ! license_is_gpl_compatible ( license ) ) {
if ( ! test_taint ( TAINT_PROPRIETARY_MODULE ) )
pr_warn ( " %s: module license '%s' taints kernel. \n " ,
mod - > name , license ) ;
add_taint_module ( mod , TAINT_PROPRIETARY_MODULE ,
LOCKDEP_NOW_UNRELIABLE ) ;
}
}
/* Parse tag=value strings from .modinfo section */
static char * next_string ( char * string , unsigned long * secsize )
{
/* Skip non-zero chars */
while ( string [ 0 ] ) {
string + + ;
if ( ( * secsize ) - - < = 1 )
return NULL ;
}
/* Skip any zero padding. */
while ( ! string [ 0 ] ) {
string + + ;
if ( ( * secsize ) - - < = 1 )
return NULL ;
}
return string ;
}
2021-11-13 09:26:51 +05:00
static char * get_modinfo ( struct load_info * info , const char * tag )
2021-10-27 18:46:41 +05:00
{
char * p ;
unsigned int taglen = strlen ( tag ) ;
Elf_Shdr * infosec = & info - > sechdrs [ info - > index . info ] ;
unsigned long size = infosec - > sh_size ;
2021-11-13 09:26:51 +05:00
for ( p = ( char * ) infosec - > sh_addr ; p ; p = next_string ( p , & size ) ) {
2021-10-27 18:46:41 +05:00
if ( strncmp ( p , tag , taglen ) = = 0 & & p [ taglen ] = = ' = ' )
return p + taglen + 1 ;
}
return NULL ;
}
static void setup_modinfo ( struct module * mod , struct load_info * info )
{
struct module_attribute * attr ;
int i ;
for ( i = 0 ; ( attr = modinfo_attrs [ i ] ) ; i + + ) {
if ( attr - > setup )
attr - > setup ( mod , get_modinfo ( info , attr - > attr . name ) ) ;
}
}
static void free_modinfo ( struct module * mod )
{
struct module_attribute * attr ;
int i ;
for ( i = 0 ; ( attr = modinfo_attrs [ i ] ) ; i + + ) {
if ( attr - > free )
attr - > free ( mod ) ;
}
}
# ifdef CONFIG_KALLSYMS
2021-11-13 09:26:51 +05:00
/* lookup symbol in given range of kernel_symbols */
static const struct kernel_symbol * lookup_symbol ( const char * name ,
const struct kernel_symbol * start ,
const struct kernel_symbol * stop )
2021-10-27 18:46:41 +05:00
{
return bsearch ( name , start , stop - start ,
sizeof ( struct kernel_symbol ) , cmp_name ) ;
}
static int is_exported ( const char * name , unsigned long value ,
const struct module * mod )
{
const struct kernel_symbol * ks ;
if ( ! mod )
2021-11-13 09:26:51 +05:00
ks = lookup_symbol ( name , __start___ksymtab , __stop___ksymtab ) ;
2021-10-27 18:46:41 +05:00
else
2021-11-13 09:26:51 +05:00
ks = lookup_symbol ( name , mod - > syms , mod - > syms + mod - > num_syms ) ;
return ks ! = NULL & & ks - > value = = value ;
2021-10-27 18:46:41 +05:00
}
/* As per nm */
static char elf_type ( const Elf_Sym * sym , const struct load_info * info )
{
const Elf_Shdr * sechdrs = info - > sechdrs ;
if ( ELF_ST_BIND ( sym - > st_info ) = = STB_WEAK ) {
if ( ELF_ST_TYPE ( sym - > st_info ) = = STT_OBJECT )
return ' v ' ;
else
return ' w ' ;
}
if ( sym - > st_shndx = = SHN_UNDEF )
return ' U ' ;
if ( sym - > st_shndx = = SHN_ABS | | sym - > st_shndx = = info - > index . pcpu )
return ' a ' ;
if ( sym - > st_shndx > = SHN_LORESERVE )
return ' ? ' ;
if ( sechdrs [ sym - > st_shndx ] . sh_flags & SHF_EXECINSTR )
return ' t ' ;
if ( sechdrs [ sym - > st_shndx ] . sh_flags & SHF_ALLOC
& & sechdrs [ sym - > st_shndx ] . sh_type ! = SHT_NOBITS ) {
if ( ! ( sechdrs [ sym - > st_shndx ] . sh_flags & SHF_WRITE ) )
return ' r ' ;
else if ( sechdrs [ sym - > st_shndx ] . sh_flags & ARCH_SHF_SMALL )
return ' g ' ;
else
return ' d ' ;
}
if ( sechdrs [ sym - > st_shndx ] . sh_type = = SHT_NOBITS ) {
if ( sechdrs [ sym - > st_shndx ] . sh_flags & ARCH_SHF_SMALL )
return ' s ' ;
else
return ' b ' ;
}
if ( strstarts ( info - > secstrings + sechdrs [ sym - > st_shndx ] . sh_name ,
" .debug " ) ) {
return ' n ' ;
}
return ' ? ' ;
}
static bool is_core_symbol ( const Elf_Sym * src , const Elf_Shdr * sechdrs ,
unsigned int shnum , unsigned int pcpundx )
{
const Elf_Shdr * sec ;
if ( src - > st_shndx = = SHN_UNDEF
| | src - > st_shndx > = shnum
| | ! src - > st_name )
return false ;
# ifdef CONFIG_KALLSYMS_ALL
if ( src - > st_shndx = = pcpundx )
return true ;
# endif
sec = sechdrs + src - > st_shndx ;
if ( ! ( sec - > sh_flags & SHF_ALLOC )
# ifndef CONFIG_KALLSYMS_ALL
| | ! ( sec - > sh_flags & SHF_EXECINSTR )
# endif
| | ( sec - > sh_entsize & INIT_OFFSET_MASK ) )
return false ;
return true ;
}
/*
* We only allocate and copy the strings needed by the parts of symtab
* we keep . This is simple , but has the effect of making multiple
* copies of duplicates . We could be more sophisticated , see
* linux - kernel thread starting with
* < 73 defb5e4bca04a6431392cc341112b1 @ localhost > .
*/
static void layout_symtab ( struct module * mod , struct load_info * info )
{
Elf_Shdr * symsect = info - > sechdrs + info - > index . sym ;
Elf_Shdr * strsect = info - > sechdrs + info - > index . str ;
const Elf_Sym * src ;
unsigned int i , nsrc , ndst , strtab_size = 0 ;
/* Put symbol section at end of init part of module. */
symsect - > sh_flags | = SHF_ALLOC ;
2021-11-13 09:26:51 +05:00
symsect - > sh_entsize = get_offset ( mod , & mod - > init_layout . size_rx , symsect ,
2021-10-27 18:46:41 +05:00
info - > index . sym ) | INIT_OFFSET_MASK ;
pr_debug ( " \t %s \n " , info - > secstrings + symsect - > sh_name ) ;
src = ( void * ) info - > hdr + symsect - > sh_offset ;
nsrc = symsect - > sh_size / sizeof ( * src ) ;
/* Compute total space required for the core symbols' strtab. */
for ( ndst = i = 0 ; i < nsrc ; i + + ) {
if ( i = = 0 | | is_livepatch_module ( mod ) | |
is_core_symbol ( src + i , info - > sechdrs , info - > hdr - > e_shnum ,
info - > index . pcpu ) ) {
strtab_size + = strlen ( & info - > strtab [ src [ i ] . st_name ] ) + 1 ;
ndst + + ;
}
}
/* Append room for core symbols at end of core part. */
2021-11-13 09:26:51 +05:00
info - > symoffs = ALIGN ( mod - > core_layout . size_rx , symsect - > sh_addralign ? : 1 ) ;
info - > stroffs = mod - > core_layout . size_rx = info - > symoffs + ndst * sizeof ( Elf_Sym ) ;
mod - > core_layout . size_rx + = strtab_size ;
mod - > core_layout . size_rx = debug_align ( mod - > core_layout . size_rx ) ;
2021-10-27 18:46:41 +05:00
/* Put string table section at end of init part of module. */
strsect - > sh_flags | = SHF_ALLOC ;
2021-11-13 09:26:51 +05:00
strsect - > sh_entsize = get_offset ( mod , & mod - > init_layout . size_rx , strsect ,
2021-10-27 18:46:41 +05:00
info - > index . str ) | INIT_OFFSET_MASK ;
pr_debug ( " \t %s \n " , info - > secstrings + strsect - > sh_name ) ;
/* We'll tack temporary mod_kallsyms on the end. */
2021-11-13 09:26:51 +05:00
mod - > init_layout . size_rx = ALIGN ( mod - > init_layout . size_rx ,
2021-10-27 18:46:41 +05:00
__alignof__ ( struct mod_kallsyms ) ) ;
2021-11-13 09:26:51 +05:00
info - > mod_kallsyms_init_off = mod - > init_layout . size_rx ;
mod - > init_layout . size_rx + = sizeof ( struct mod_kallsyms ) ;
mod - > init_layout . size_rx = debug_align ( mod - > init_layout . size_rx ) ;
2021-10-27 18:46:41 +05:00
}
/*
* We use the full symtab and strtab which layout_symtab arranged to
* be appended to the init section . Later we switch to the cut - down
* core - only ones .
*/
static void add_kallsyms ( struct module * mod , const struct load_info * info )
{
unsigned int i , ndst ;
const Elf_Sym * src ;
Elf_Sym * dst ;
char * s ;
Elf_Shdr * symsec = & info - > sechdrs [ info - > index . sym ] ;
/* Set up to point into init section. */
2021-11-13 09:26:51 +05:00
mod - > kallsyms = mod - > init_layout . base_rx + info - > mod_kallsyms_init_off ;
pax_open_kernel ( ) ;
2021-10-27 18:46:41 +05:00
mod - > kallsyms - > symtab = ( void * ) symsec - > sh_addr ;
mod - > kallsyms - > num_symtab = symsec - > sh_size / sizeof ( Elf_Sym ) ;
/* Make sure we get permanent strtab: don't use info->strtab. */
mod - > kallsyms - > strtab = ( void * ) info - > sechdrs [ info - > index . str ] . sh_addr ;
2021-11-13 09:26:51 +05:00
/* Set types up while we still have access to sections. */
for ( i = 0 ; i < mod - > kallsyms - > num_symtab ; i + + )
mod - > kallsyms - > symtab [ i ] . st_info
= elf_type ( & mod - > kallsyms - > symtab [ i ] , info ) ;
/* Now populate the cut down core kallsyms for after init. */
mod - > core_kallsyms . symtab = dst = mod - > core_layout . base_rx + info - > symoffs ;
mod - > core_kallsyms . strtab = s = mod - > core_layout . base_rx + info - > stroffs ;
2021-10-27 18:46:41 +05:00
src = mod - > kallsyms - > symtab ;
for ( ndst = i = 0 ; i < mod - > kallsyms - > num_symtab ; i + + ) {
if ( i = = 0 | | is_livepatch_module ( mod ) | |
is_core_symbol ( src + i , info - > sechdrs , info - > hdr - > e_shnum ,
info - > index . pcpu ) ) {
dst [ ndst ] = src [ i ] ;
dst [ ndst + + ] . st_name = s - mod - > core_kallsyms . strtab ;
s + = strlcpy ( s , & mod - > kallsyms - > strtab [ src [ i ] . st_name ] ,
KSYM_NAME_LEN ) + 1 ;
}
}
mod - > core_kallsyms . num_symtab = ndst ;
2021-11-13 09:26:51 +05:00
pax_close_kernel ( ) ;
2021-10-27 18:46:41 +05:00
}
# else
static inline void layout_symtab ( struct module * mod , struct load_info * info )
{
}
static void add_kallsyms ( struct module * mod , const struct load_info * info )
{
}
# endif /* CONFIG_KALLSYMS */
2021-11-13 09:26:51 +05:00
static void dynamic_debug_setup ( struct _ddebug * debug , unsigned int num )
2021-10-27 18:46:41 +05:00
{
if ( ! debug )
return ;
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_DYNAMIC_DEBUG
if ( ddebug_add_module ( debug , num , debug - > modname ) )
pr_err ( " dynamic debug error adding module: %s \n " ,
debug - > modname ) ;
# endif
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
static void dynamic_debug_remove ( struct _ddebug * debug )
2021-10-27 18:46:41 +05:00
{
if ( debug )
2021-11-13 09:26:51 +05:00
ddebug_remove_module ( debug - > modname ) ;
2021-10-27 18:46:41 +05:00
}
void * __weak module_alloc ( unsigned long size )
{
2021-11-13 09:26:51 +05:00
return vmalloc_exec ( size ) ;
2021-10-27 18:46:41 +05:00
}
# ifdef CONFIG_DEBUG_KMEMLEAK
static void kmemleak_load_module ( const struct module * mod ,
const struct load_info * info )
{
unsigned int i ;
/* only scan the sections containing data */
kmemleak_scan_area ( mod , sizeof ( struct module ) , GFP_KERNEL ) ;
for ( i = 1 ; i < info - > hdr - > e_shnum ; i + + ) {
/* Scan all writable sections that's not executable */
if ( ! ( info - > sechdrs [ i ] . sh_flags & SHF_ALLOC ) | |
! ( info - > sechdrs [ i ] . sh_flags & SHF_WRITE ) | |
( info - > sechdrs [ i ] . sh_flags & SHF_EXECINSTR ) )
continue ;
kmemleak_scan_area ( ( void * ) info - > sechdrs [ i ] . sh_addr ,
info - > sechdrs [ i ] . sh_size , GFP_KERNEL ) ;
}
}
# else
static inline void kmemleak_load_module ( const struct module * mod ,
const struct load_info * info )
{
}
# endif
# ifdef CONFIG_MODULE_SIG
static int module_sig_check ( struct load_info * info , int flags )
{
2021-11-13 09:26:51 +05:00
int err = - ENOKEY ;
2021-10-27 18:46:41 +05:00
const unsigned long markerlen = sizeof ( MODULE_SIG_STRING ) - 1 ;
const void * mod = info - > hdr ;
/*
* Require flags = = 0 , as a module with version information
* removed is no longer the module that was signed
*/
if ( flags = = 0 & &
info - > len > markerlen & &
memcmp ( mod + info - > len - markerlen , MODULE_SIG_STRING , markerlen ) = = 0 ) {
/* We truncate the module to discard the signature */
info - > len - = markerlen ;
2021-11-13 09:26:51 +05:00
err = mod_verify_sig ( mod , & info - > len ) ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
if ( ! err ) {
info - > sig_ok = true ;
return 0 ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
/* Not having a signature is only an error if we're strict. */
if ( err = = - ENOKEY & & ! sig_enforce )
err = 0 ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
return err ;
2021-10-27 18:46:41 +05:00
}
# else /* !CONFIG_MODULE_SIG */
static int module_sig_check ( struct load_info * info , int flags )
{
return 0 ;
}
# endif /* !CONFIG_MODULE_SIG */
2021-11-13 09:26:51 +05:00
/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
static int elf_header_check ( struct load_info * info )
2021-10-27 18:46:41 +05:00
{
if ( info - > len < sizeof ( * ( info - > hdr ) ) )
return - ENOEXEC ;
if ( memcmp ( info - > hdr - > e_ident , ELFMAG , SELFMAG ) ! = 0
| | info - > hdr - > e_type ! = ET_REL
| | ! elf_check_arch ( info - > hdr )
| | info - > hdr - > e_shentsize ! = sizeof ( Elf_Shdr ) )
return - ENOEXEC ;
if ( info - > hdr - > e_shoff > = info - > len
| | ( info - > hdr - > e_shnum * sizeof ( Elf_Shdr ) >
info - > len - info - > hdr - > e_shoff ) )
return - ENOEXEC ;
return 0 ;
}
# define COPY_CHUNK_SIZE (16*PAGE_SIZE)
static int copy_chunked_from_user ( void * dst , const void __user * usrc , unsigned long len )
{
do {
unsigned long n = min ( len , COPY_CHUNK_SIZE ) ;
if ( copy_from_user ( dst , usrc , n ) ! = 0 )
return - EFAULT ;
cond_resched ( ) ;
dst + = n ;
usrc + = n ;
len - = n ;
} while ( len ) ;
return 0 ;
}
# ifdef CONFIG_LIVEPATCH
static int check_modinfo_livepatch ( struct module * mod , struct load_info * info )
{
if ( get_modinfo ( info , " livepatch " ) ) {
mod - > klp = true ;
add_taint_module ( mod , TAINT_LIVEPATCH , LOCKDEP_STILL_OK ) ;
}
return 0 ;
}
# else /* !CONFIG_LIVEPATCH */
static int check_modinfo_livepatch ( struct module * mod , struct load_info * info )
{
if ( get_modinfo ( info , " livepatch " ) ) {
pr_err ( " %s: module is marked as livepatch module, but livepatch support is disabled " ,
mod - > name ) ;
return - ENOEXEC ;
}
return 0 ;
}
# endif /* CONFIG_LIVEPATCH */
/* Sets info->hdr and info->len. */
static int copy_module_from_user ( const void __user * umod , unsigned long len ,
struct load_info * info )
{
int err ;
info - > len = len ;
if ( info - > len < sizeof ( * ( info - > hdr ) ) )
return - ENOEXEC ;
2021-11-13 09:26:51 +05:00
err = security_kernel_read_file ( NULL , READING_MODULE ) ;
2021-10-27 18:46:41 +05:00
if ( err )
return err ;
/* Suck in entire file: we'll want most of it. */
2021-11-13 09:26:51 +05:00
info - > hdr = __vmalloc ( info - > len ,
GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN , PAGE_KERNEL ) ;
2021-10-27 18:46:41 +05:00
if ( ! info - > hdr )
return - ENOMEM ;
if ( copy_chunked_from_user ( info - > hdr , umod , info - > len ) ! = 0 ) {
vfree ( info - > hdr ) ;
2021-11-13 09:26:51 +05:00
return - EFAULT ;
}
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
return 0 ;
2021-10-27 18:46:41 +05:00
}
static void free_copy ( struct load_info * info )
{
vfree ( info - > hdr ) ;
}
static int rewrite_section_headers ( struct load_info * info , int flags )
{
unsigned int i ;
/* This should always be true, but let's be sure. */
info - > sechdrs [ 0 ] . sh_addr = 0 ;
for ( i = 1 ; i < info - > hdr - > e_shnum ; i + + ) {
Elf_Shdr * shdr = & info - > sechdrs [ i ] ;
2021-11-13 09:26:51 +05:00
if ( shdr - > sh_type ! = SHT_NOBITS
& & info - > len < shdr - > sh_offset + shdr - > sh_size ) {
pr_err ( " Module len %lu truncated \n " , info - > len ) ;
return - ENOEXEC ;
}
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
/* Mark all sections sh_addr with their address in the
temporary image . */
2021-10-27 18:46:41 +05:00
shdr - > sh_addr = ( size_t ) info - > hdr + shdr - > sh_offset ;
2021-11-13 09:26:51 +05:00
# ifndef CONFIG_MODULE_UNLOAD
/* Don't load .exit sections */
if ( strstarts ( info - > secstrings + shdr - > sh_name , " .exit " ) )
shdr - > sh_flags & = ~ ( unsigned long ) SHF_ALLOC ;
# endif
2021-10-27 18:46:41 +05:00
}
/* Track but don't keep modinfo and version sections. */
2021-11-13 09:26:51 +05:00
if ( flags & MODULE_INIT_IGNORE_MODVERSIONS )
info - > index . vers = 0 ; /* Pretend no __versions section! */
else
info - > index . vers = find_sec ( info , " __versions " ) ;
info - > index . info = find_sec ( info , " .modinfo " ) ;
2021-10-27 18:46:41 +05:00
info - > sechdrs [ info - > index . info ] . sh_flags & = ~ ( unsigned long ) SHF_ALLOC ;
2021-11-13 09:26:51 +05:00
info - > sechdrs [ info - > index . vers ] . sh_flags & = ~ ( unsigned long ) SHF_ALLOC ;
2021-10-27 18:46:41 +05:00
return 0 ;
}
/*
* Set up our basic convenience variables ( pointers to section headers ,
* search for module section index etc ) , and do some basic section
* verification .
*
2021-11-13 09:26:51 +05:00
* Return the temporary module pointer ( we ' ll replace it with the final
* one when we move the module sections around ) .
2021-10-27 18:46:41 +05:00
*/
2021-11-13 09:26:51 +05:00
static struct module * setup_load_info ( struct load_info * info , int flags )
2021-10-27 18:46:41 +05:00
{
unsigned int i ;
2021-11-13 09:26:51 +05:00
int err ;
struct module * mod ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
/* Set up the convenience variables */
info - > sechdrs = ( void * ) info - > hdr + info - > hdr - > e_shoff ;
info - > secstrings = ( void * ) info - > hdr
+ info - > sechdrs [ info - > hdr - > e_shstrndx ] . sh_offset ;
err = rewrite_section_headers ( info , flags ) ;
if ( err )
return ERR_PTR ( err ) ;
2021-10-27 18:46:41 +05:00
/* Find internal symbols and strings. */
for ( i = 1 ; i < info - > hdr - > e_shnum ; i + + ) {
if ( info - > sechdrs [ i ] . sh_type = = SHT_SYMTAB ) {
info - > index . sym = i ;
info - > index . str = info - > sechdrs [ i ] . sh_link ;
info - > strtab = ( char * ) info - > hdr
+ info - > sechdrs [ info - > index . str ] . sh_offset ;
break ;
}
}
info - > index . mod = find_sec ( info , " .gnu.linkonce.this_module " ) ;
if ( ! info - > index . mod ) {
2021-11-13 09:26:51 +05:00
pr_warn ( " No module found in object \n " ) ;
return ERR_PTR ( - ENOEXEC ) ;
2021-10-27 18:46:41 +05:00
}
/* This is temporary: point mod into copy of data. */
2021-11-13 09:26:51 +05:00
mod = ( void * ) info - > sechdrs [ info - > index . mod ] . sh_addr ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
if ( info - > index . sym = = 0 ) {
# ifdef CONFIG_GRKERNSEC_RANDSTRUCT
/*
* avoid potentially printing jibberish on attempted load
* of a module randomized with a different seed
*/
pr_warn ( " module has no symbols (stripped?) \n " ) ;
# else
pr_warn ( " %s: module has no symbols (stripped?) \n " , mod - > name ) ;
# endif
return ERR_PTR ( - ENOEXEC ) ;
}
2021-10-27 18:46:41 +05:00
info - > index . pcpu = find_pcpusec ( info ) ;
2021-11-13 09:26:51 +05:00
/* Check module struct version now, before we try to use module. */
if ( ! check_modstruct_version ( info - > sechdrs , info - > index . vers , mod ) )
return ERR_PTR ( - ENOEXEC ) ;
return mod ;
2021-10-27 18:46:41 +05:00
}
static int check_modinfo ( struct module * mod , struct load_info * info , int flags )
{
const char * modmagic = get_modinfo ( info , " vermagic " ) ;
2021-11-13 09:26:51 +05:00
const char * license = get_modinfo ( info , " license " ) ;
2021-10-27 18:46:41 +05:00
int err ;
2021-11-13 09:26:51 +05:00
# if defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR) || defined(CONFIG_PAX_RAP)
if ( ! license | | ! license_is_gpl_compatible ( license ) ) {
pr_err ( " %s: module is not compatible with the KERNEXEC 'or' method and RAP \n " , mod - > name ) ;
return - ENOEXEC ;
}
# endif
2021-10-27 18:46:41 +05:00
if ( flags & MODULE_INIT_IGNORE_VERMAGIC )
modmagic = NULL ;
/* This is allowed: modprobe --force will invalidate it. */
if ( ! modmagic ) {
err = try_to_force_load ( mod , " bad vermagic " ) ;
if ( err )
return err ;
} else if ( ! same_magic ( modmagic , vermagic , info - > index . vers ) ) {
pr_err ( " %s: version magic '%s' should be '%s' \n " ,
2021-11-13 09:26:51 +05:00
mod - > name , modmagic , vermagic ) ;
2021-10-27 18:46:41 +05:00
return - ENOEXEC ;
}
if ( ! get_modinfo ( info , " intree " ) ) {
if ( ! test_taint ( TAINT_OOT_MODULE ) )
pr_warn ( " %s: loading out-of-tree module taints kernel. \n " ,
mod - > name ) ;
add_taint_module ( mod , TAINT_OOT_MODULE , LOCKDEP_STILL_OK ) ;
}
if ( get_modinfo ( info , " staging " ) ) {
add_taint_module ( mod , TAINT_CRAP , LOCKDEP_STILL_OK ) ;
pr_warn ( " %s: module is from the staging directory, the quality "
" is unknown, you have been warned. \n " , mod - > name ) ;
}
err = check_modinfo_livepatch ( mod , info ) ;
if ( err )
return err ;
/* Set up license info based on the info section */
2021-11-13 09:26:51 +05:00
set_license ( mod , license ) ;
2021-10-27 18:46:41 +05:00
return 0 ;
}
static int find_module_sections ( struct module * mod , struct load_info * info )
{
mod - > kp = section_objs ( info , " __param " ,
sizeof ( * mod - > kp ) , & mod - > num_kp ) ;
mod - > syms = section_objs ( info , " __ksymtab " ,
sizeof ( * mod - > syms ) , & mod - > num_syms ) ;
mod - > crcs = section_addr ( info , " __kcrctab " ) ;
mod - > gpl_syms = section_objs ( info , " __ksymtab_gpl " ,
sizeof ( * mod - > gpl_syms ) ,
& mod - > num_gpl_syms ) ;
mod - > gpl_crcs = section_addr ( info , " __kcrctab_gpl " ) ;
2021-11-13 09:26:51 +05:00
mod - > gpl_future_syms = section_objs ( info ,
" __ksymtab_gpl_future " ,
sizeof ( * mod - > gpl_future_syms ) ,
& mod - > num_gpl_future_syms ) ;
mod - > gpl_future_crcs = section_addr ( info , " __kcrctab_gpl_future " ) ;
# ifdef CONFIG_UNUSED_SYMBOLS
mod - > unused_syms = section_objs ( info , " __ksymtab_unused " ,
sizeof ( * mod - > unused_syms ) ,
& mod - > num_unused_syms ) ;
mod - > unused_crcs = section_addr ( info , " __kcrctab_unused " ) ;
mod - > unused_gpl_syms = section_objs ( info , " __ksymtab_unused_gpl " ,
sizeof ( * mod - > unused_gpl_syms ) ,
& mod - > num_unused_gpl_syms ) ;
mod - > unused_gpl_crcs = section_addr ( info , " __kcrctab_unused_gpl " ) ;
# endif
2021-10-27 18:46:41 +05:00
# ifdef CONFIG_CONSTRUCTORS
mod - > ctors = section_objs ( info , " .ctors " ,
sizeof ( * mod - > ctors ) , & mod - > num_ctors ) ;
if ( ! mod - > ctors )
mod - > ctors = section_objs ( info , " .init_array " ,
sizeof ( * mod - > ctors ) , & mod - > num_ctors ) ;
else if ( find_sec ( info , " .init_array " ) ) {
/*
* This shouldn ' t happen with same compiler and binutils
* building all parts of the module .
*/
pr_warn ( " %s: has both .ctors and .init_array. \n " ,
mod - > name ) ;
return - EINVAL ;
}
# endif
# ifdef CONFIG_TRACEPOINTS
mod - > tracepoints_ptrs = section_objs ( info , " __tracepoints_ptrs " ,
sizeof ( * mod - > tracepoints_ptrs ) ,
& mod - > num_tracepoints ) ;
# endif
2021-11-13 09:26:51 +05:00
# ifdef HAVE_JUMP_LABEL
2021-10-27 18:46:41 +05:00
mod - > jump_entries = section_objs ( info , " __jump_table " ,
sizeof ( * mod - > jump_entries ) ,
& mod - > num_jump_entries ) ;
# endif
# ifdef CONFIG_EVENT_TRACING
mod - > trace_events = section_objs ( info , " _ftrace_events " ,
sizeof ( * mod - > trace_events ) ,
& mod - > num_trace_events ) ;
2021-11-13 09:26:51 +05:00
mod - > trace_enums = section_objs ( info , " _ftrace_enum_map " ,
sizeof ( * mod - > trace_enums ) ,
& mod - > num_trace_enums ) ;
2021-10-27 18:46:41 +05:00
# endif
# ifdef CONFIG_TRACING
mod - > trace_bprintk_fmt_start = section_objs ( info , " __trace_printk_fmt " ,
sizeof ( * mod - > trace_bprintk_fmt_start ) ,
& mod - > num_trace_bprintk_fmt ) ;
# endif
# ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */
2021-11-13 09:26:51 +05:00
mod - > ftrace_callsites = section_objs ( info , " __mcount_loc " ,
2021-10-27 18:46:41 +05:00
sizeof ( * mod - > ftrace_callsites ) ,
& mod - > num_ftrace_callsites ) ;
# endif
2021-11-13 09:26:51 +05:00
2021-10-27 18:46:41 +05:00
mod - > extable = section_objs ( info , " __ex_table " ,
sizeof ( * mod - > extable ) , & mod - > num_exentries ) ;
if ( section_addr ( info , " __obsparm " ) )
pr_warn ( " %s: Ignoring obsolete parameters \n " , mod - > name ) ;
2021-11-13 09:26:51 +05:00
info - > debug = section_objs ( info , " __verbose " ,
2021-10-27 18:46:41 +05:00
sizeof ( * info - > debug ) , & info - > num_debug ) ;
return 0 ;
}
static int move_module ( struct module * mod , struct load_info * info )
{
int i ;
void * ptr ;
/* Do the allocs. */
2021-11-13 09:26:51 +05:00
ptr = module_alloc ( mod - > core_layout . size_rw ) ;
2021-10-27 18:46:41 +05:00
/*
* The pointer to this block is stored in the module structure
* which is inside the block . Just mark it as not being a
* leak .
*/
kmemleak_not_leak ( ptr ) ;
if ( ! ptr )
return - ENOMEM ;
2021-11-13 09:26:51 +05:00
memset ( ptr , 0 , mod - > core_layout . size_rw ) ;
mod - > core_layout . base_rw = ptr ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
if ( mod - > init_layout . size_rw ) {
ptr = module_alloc ( mod - > init_layout . size_rw ) ;
2021-10-27 18:46:41 +05:00
/*
* The pointer to this block is stored in the module structure
* which is inside the block . This block doesn ' t need to be
* scanned as it contains data and code that will be freed
* after the module is initialized .
*/
kmemleak_ignore ( ptr ) ;
if ( ! ptr ) {
2021-11-13 09:26:51 +05:00
module_memfree ( mod - > core_layout . base_rw ) ;
return - ENOMEM ;
}
memset ( ptr , 0 , mod - > init_layout . size_rw ) ;
mod - > init_layout . base_rw = ptr ;
} else
mod - > init_layout . base_rw = NULL ;
ptr = module_alloc_exec ( mod - > core_layout . size_rx ) ;
kmemleak_not_leak ( ptr ) ;
if ( ! ptr ) {
if ( mod - > init_layout . base_rw )
module_memfree ( mod - > init_layout . base_rw ) ;
module_memfree ( mod - > core_layout . base_rw ) ;
return - ENOMEM ;
}
pax_open_kernel ( ) ;
memset ( ptr , 0 , mod - > core_layout . size_rx ) ;
pax_close_kernel ( ) ;
mod - > core_layout . base_rx = ptr ;
if ( mod - > init_layout . size_rx ) {
ptr = module_alloc_exec ( mod - > init_layout . size_rx ) ;
kmemleak_ignore ( ptr ) ;
if ( ! ptr ) {
module_memfree ( mod - > core_layout . base_rx ) ;
if ( mod - > init_layout . base_rw )
module_memfree ( mod - > init_layout . base_rw ) ;
module_memfree ( mod - > core_layout . base_rw ) ;
2021-10-27 18:46:41 +05:00
return - ENOMEM ;
}
2021-11-13 09:26:51 +05:00
pax_open_kernel ( ) ;
memset ( ptr , 0 , mod - > init_layout . size_rx ) ;
pax_close_kernel ( ) ;
mod - > init_layout . base_rx = ptr ;
2021-10-27 18:46:41 +05:00
} else
2021-11-13 09:26:51 +05:00
mod - > init_layout . base_rx = NULL ;
2021-10-27 18:46:41 +05:00
/* Transfer each section which specifies SHF_ALLOC */
pr_debug ( " final section addresses: \n " ) ;
for ( i = 0 ; i < info - > hdr - > e_shnum ; i + + ) {
void * dest ;
Elf_Shdr * shdr = & info - > sechdrs [ i ] ;
if ( ! ( shdr - > sh_flags & SHF_ALLOC ) )
continue ;
2021-11-13 09:26:51 +05:00
if ( shdr - > sh_entsize & INIT_OFFSET_MASK ) {
if ( ( shdr - > sh_flags & SHF_WRITE ) | | ! ( shdr - > sh_flags & SHF_ALLOC ) )
dest = mod - > init_layout . base_rw
+ ( shdr - > sh_entsize & ~ INIT_OFFSET_MASK ) ;
else
dest = mod - > init_layout . base_rx
+ ( shdr - > sh_entsize & ~ INIT_OFFSET_MASK ) ;
} else {
if ( ( shdr - > sh_flags & SHF_WRITE ) | | ! ( shdr - > sh_flags & SHF_ALLOC ) )
dest = mod - > core_layout . base_rw + shdr - > sh_entsize ;
else
dest = mod - > core_layout . base_rx + shdr - > sh_entsize ;
}
if ( shdr - > sh_type ! = SHT_NOBITS ) {
# ifdef CONFIG_PAX_KERNEXEC
# ifdef CONFIG_X86_64
if ( ( shdr - > sh_flags & SHF_WRITE ) & & ( shdr - > sh_flags & SHF_EXECINSTR ) )
set_memory_x ( ( unsigned long ) dest , ( shdr - > sh_size + PAGE_SIZE ) > > PAGE_SHIFT ) ;
# endif
if ( ! ( shdr - > sh_flags & SHF_WRITE ) & & ( shdr - > sh_flags & SHF_ALLOC ) ) {
pax_open_kernel ( ) ;
memcpy ( dest , ( void * ) shdr - > sh_addr , shdr - > sh_size ) ;
pax_close_kernel ( ) ;
} else
# endif
2021-10-27 18:46:41 +05:00
memcpy ( dest , ( void * ) shdr - > sh_addr , shdr - > sh_size ) ;
2021-11-13 09:26:51 +05:00
}
2021-10-27 18:46:41 +05:00
/* Update sh_addr to point to copy in image. */
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_PAX_KERNEXEC
if ( shdr - > sh_flags & SHF_EXECINSTR )
shdr - > sh_addr = ktva_ktla ( ( unsigned long ) dest ) ;
else
# endif
shdr - > sh_addr = ( unsigned long ) dest ;
2021-10-27 18:46:41 +05:00
pr_debug ( " \t 0x%lx %s \n " ,
( long ) shdr - > sh_addr , info - > secstrings + shdr - > sh_name ) ;
}
return 0 ;
}
static int check_module_license_and_versions ( struct module * mod )
{
int prev_taint = test_taint ( TAINT_PROPRIETARY_MODULE ) ;
/*
* ndiswrapper is under GPL by itself , but loads proprietary modules .
* Don ' t use add_taint_module ( ) , as it would prevent ndiswrapper from
* using GPL - only symbols it needs .
*/
if ( strcmp ( mod - > name , " ndiswrapper " ) = = 0 )
add_taint ( TAINT_PROPRIETARY_MODULE , LOCKDEP_NOW_UNRELIABLE ) ;
/* driverloader was caught wrongly pretending to be under GPL */
if ( strcmp ( mod - > name , " driverloader " ) = = 0 )
add_taint_module ( mod , TAINT_PROPRIETARY_MODULE ,
LOCKDEP_NOW_UNRELIABLE ) ;
/* lve claims to be GPL but upstream won't provide source */
if ( strcmp ( mod - > name , " lve " ) = = 0 )
add_taint_module ( mod , TAINT_PROPRIETARY_MODULE ,
LOCKDEP_NOW_UNRELIABLE ) ;
if ( ! prev_taint & & test_taint ( TAINT_PROPRIETARY_MODULE ) )
pr_warn ( " %s: module license taints kernel. \n " , mod - > name ) ;
# ifdef CONFIG_MODVERSIONS
2021-11-13 09:26:51 +05:00
if ( ( mod - > num_syms & & ! mod - > crcs )
| | ( mod - > num_gpl_syms & & ! mod - > gpl_crcs )
| | ( mod - > num_gpl_future_syms & & ! mod - > gpl_future_crcs )
# ifdef CONFIG_UNUSED_SYMBOLS
| | ( mod - > num_unused_syms & & ! mod - > unused_crcs )
| | ( mod - > num_unused_gpl_syms & & ! mod - > unused_gpl_crcs )
# endif
) {
2021-10-27 18:46:41 +05:00
return try_to_force_load ( mod ,
" no versions for exported symbols " ) ;
}
# endif
return 0 ;
}
static void flush_module_icache ( const struct module * mod )
{
2021-11-13 09:26:51 +05:00
mm_segment_t old_fs ;
/* flush the icache in correct context */
old_fs = get_fs ( ) ;
set_fs ( KERNEL_DS ) ;
2021-10-27 18:46:41 +05:00
/*
* Flush the instruction cache , since we ' ve played with text .
* Do it before processing of module parameters , so the module
* can provide parameter accessor functions of its own .
*/
2021-11-13 09:26:51 +05:00
if ( mod - > init_layout . base_rx )
flush_icache_range ( ( unsigned long ) mod - > init_layout . base_rx ,
( unsigned long ) mod - > init_layout . base_rx
+ mod - > init_layout . size_rx ) ;
flush_icache_range ( ( unsigned long ) mod - > core_layout . base_rx ,
( unsigned long ) mod - > core_layout . base_rx + mod - > core_layout . size_rx ) ;
set_fs ( old_fs ) ;
2021-10-27 18:46:41 +05:00
}
int __weak module_frob_arch_sections ( Elf_Ehdr * hdr ,
Elf_Shdr * sechdrs ,
char * secstrings ,
struct module * mod )
{
return 0 ;
}
/* module_blacklist is a comma-separated list of module names */
static char * module_blacklist ;
2021-11-13 09:26:51 +05:00
static bool blacklisted ( char * module_name )
2021-10-27 18:46:41 +05:00
{
const char * p ;
size_t len ;
if ( ! module_blacklist )
return false ;
for ( p = module_blacklist ; * p ; p + = len ) {
len = strcspn ( p , " , " ) ;
if ( strlen ( module_name ) = = len & & ! memcmp ( module_name , p , len ) )
return true ;
if ( p [ len ] = = ' , ' )
len + + ;
}
return false ;
}
core_param ( module_blacklist , module_blacklist , charp , 0400 ) ;
static struct module * layout_and_allocate ( struct load_info * info , int flags )
{
2021-11-13 09:26:51 +05:00
/* Module within temporary copy. */
2021-10-27 18:46:41 +05:00
struct module * mod ;
unsigned int ndx ;
int err ;
2021-11-13 09:26:51 +05:00
mod = setup_load_info ( info , flags ) ;
if ( IS_ERR ( mod ) )
return mod ;
if ( blacklisted ( mod - > name ) )
return ERR_PTR ( - EPERM ) ;
err = check_modinfo ( mod , info , flags ) ;
2021-10-27 18:46:41 +05:00
if ( err )
return ERR_PTR ( err ) ;
/* Allow arches to frob section contents and sizes. */
err = module_frob_arch_sections ( info - > hdr , info - > sechdrs ,
2021-11-13 09:26:51 +05:00
info - > secstrings , mod ) ;
2021-10-27 18:46:41 +05:00
if ( err < 0 )
return ERR_PTR ( err ) ;
/* We will do a special allocation for per-cpu sections later. */
info - > sechdrs [ info - > index . pcpu ] . sh_flags & = ~ ( unsigned long ) SHF_ALLOC ;
/*
* Mark ro_after_init section with SHF_RO_AFTER_INIT so that
* layout_sections ( ) can put it in the right place .
* Note : ro_after_init sections also have SHF_ { WRITE , ALLOC } set .
*/
ndx = find_sec ( info , " .data..ro_after_init " ) ;
if ( ndx )
info - > sechdrs [ ndx ] . sh_flags | = SHF_RO_AFTER_INIT ;
2021-11-13 09:26:51 +05:00
/* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically ; there doesn ' t appear to be any
special cases for the architectures . */
layout_sections ( mod , info ) ;
layout_symtab ( mod , info ) ;
2021-10-27 18:46:41 +05:00
/* Allocate and move to the final place */
2021-11-13 09:26:51 +05:00
err = move_module ( mod , info ) ;
2021-10-27 18:46:41 +05:00
if ( err )
return ERR_PTR ( err ) ;
/* Module has been copied to its final place now: return it. */
mod = ( void * ) info - > sechdrs [ info - > index . mod ] . sh_addr ;
kmemleak_load_module ( mod , info ) ;
return mod ;
}
/* mod is no longer valid after this! */
static void module_deallocate ( struct module * mod , struct load_info * info )
{
percpu_modfree ( mod ) ;
module_arch_freeing_init ( mod ) ;
2021-11-13 09:26:51 +05:00
module_memfree_exec ( mod - > init_layout . base_rx ) ;
module_memfree_exec ( mod - > core_layout . base_rx ) ;
module_memfree ( mod - > init_layout . base_rw ) ;
module_memfree ( mod - > core_layout . base_rw ) ;
2021-10-27 18:46:41 +05:00
}
int __weak module_finalize ( const Elf_Ehdr * hdr ,
const Elf_Shdr * sechdrs ,
struct module * me )
{
return 0 ;
}
static int post_relocation ( struct module * mod , const struct load_info * info )
{
/* Sort exception table now relocations are done. */
2021-11-13 09:26:51 +05:00
pax_open_kernel ( ) ;
2021-10-27 18:46:41 +05:00
sort_extable ( mod - > extable , mod - > extable + mod - > num_exentries ) ;
2021-11-13 09:26:51 +05:00
pax_close_kernel ( ) ;
2021-10-27 18:46:41 +05:00
/* Copy relocated percpu area over. */
percpu_modcopy ( mod , ( void * ) info - > sechdrs [ info - > index . pcpu ] . sh_addr ,
info - > sechdrs [ info - > index . pcpu ] . sh_size ) ;
/* Setup kallsyms-specific fields. */
add_kallsyms ( mod , info ) ;
/* Arch-specific module finalizing. */
return module_finalize ( info - > hdr , info - > sechdrs , mod ) ;
}
/* Is this module of this name done loading? No locks held. */
static bool finished_loading ( const char * name )
{
struct module * mod ;
bool ret ;
/*
* The module_mutex should not be a heavily contended lock ;
* if we get the occasional sleep here , we ' ll go an extra iteration
* in the wait_event_interruptible ( ) , which is harmless .
*/
sched_annotate_sleep ( ) ;
mutex_lock ( & module_mutex ) ;
mod = find_module_all ( name , strlen ( name ) , true ) ;
2021-11-13 09:26:51 +05:00
ret = ! mod | | mod - > state = = MODULE_STATE_LIVE
| | mod - > state = = MODULE_STATE_GOING ;
2021-10-27 18:46:41 +05:00
mutex_unlock ( & module_mutex ) ;
return ret ;
}
/* Call module constructors. */
static void do_mod_ctors ( struct module * mod )
{
# ifdef CONFIG_CONSTRUCTORS
unsigned long i ;
for ( i = 0 ; i < mod - > num_ctors ; i + + )
mod - > ctors [ i ] ( ) ;
# endif
}
/* For freeing module_init on success, in case kallsyms traversing */
struct mod_initfree {
2021-11-13 09:26:51 +05:00
struct rcu_head rcu ;
void * module_init_rw ;
void * module_init_rx ;
2021-10-27 18:46:41 +05:00
} ;
2021-11-13 09:26:51 +05:00
static void do_free_init ( struct rcu_head * head )
2021-10-27 18:46:41 +05:00
{
2021-11-13 09:26:51 +05:00
struct mod_initfree * m = container_of ( head , struct mod_initfree , rcu ) ;
module_memfree ( m - > module_init_rw ) ;
module_memfree_exec ( m - > module_init_rx ) ;
kfree ( m ) ;
2021-10-27 18:46:41 +05:00
}
/*
* This is where the real work happens .
*
* Keep it uninlined to provide a reliable breakpoint target , e . g . for the gdb
* helper command ' lx - symbols ' .
*/
static noinline int do_init_module ( struct module * mod )
{
int ret = 0 ;
struct mod_initfree * freeinit ;
freeinit = kmalloc ( sizeof ( * freeinit ) , GFP_KERNEL ) ;
if ( ! freeinit ) {
ret = - ENOMEM ;
goto fail ;
}
2021-11-13 09:26:51 +05:00
freeinit - > module_init_rx = mod - > init_layout . base_rx ;
freeinit - > module_init_rw = mod - > init_layout . base_rw ;
2021-10-27 18:46:41 +05:00
/*
* We want to find out whether @ mod uses async during init . Clear
* PF_USED_ASYNC . async_schedule * ( ) will set it .
*/
current - > flags & = ~ PF_USED_ASYNC ;
do_mod_ctors ( mod ) ;
/* Start the module */
if ( mod - > init ! = NULL )
ret = do_one_initcall ( mod - > init ) ;
if ( ret < 0 ) {
goto fail_free_freeinit ;
}
if ( ret > 0 ) {
pr_warn ( " %s: '%s'->init suspiciously returned %d, it should "
" follow 0/-E convention \n "
" %s: loading module anyway... \n " ,
__func__ , mod - > name , ret , __func__ ) ;
dump_stack ( ) ;
}
/* Now it's a first class citizen! */
mod - > state = MODULE_STATE_LIVE ;
blocking_notifier_call_chain ( & module_notify_list ,
MODULE_STATE_LIVE , mod ) ;
/*
* We need to finish all async code before the module init sequence
* is done . This has potential to deadlock . For example , a newly
* detected block device can trigger request_module ( ) of the
* default iosched from async probing task . Once userland helper
* reaches here , async_synchronize_full ( ) will wait on the async
* task waiting on request_module ( ) and deadlock .
*
* This deadlock is avoided by perfomring async_synchronize_full ( )
* iff module init queued any async jobs . This isn ' t a full
* solution as it will deadlock the same if module loading from
* async jobs nests more than once ; however , due to the various
* constraints , this hack seems to be the best option for now .
* Please refer to the following thread for details .
*
* http : //thread.gmane.org/gmane.linux.kernel/1420814
*/
if ( ! mod - > async_probe_requested & & ( current - > flags & PF_USED_ASYNC ) )
async_synchronize_full ( ) ;
mutex_lock ( & module_mutex ) ;
/* Drop initial reference. */
module_put ( mod ) ;
trim_init_extable ( mod ) ;
# ifdef CONFIG_KALLSYMS
/* Switch to core kallsyms now init is done: kallsyms may be walking! */
rcu_assign_pointer ( mod - > kallsyms , & mod - > core_kallsyms ) ;
# endif
module_enable_ro ( mod , true ) ;
mod_tree_remove_init ( mod ) ;
2021-11-13 09:26:51 +05:00
disable_ro_nx ( & mod - > init_layout ) ;
2021-10-27 18:46:41 +05:00
module_arch_freeing_init ( mod ) ;
2021-11-13 09:26:51 +05:00
mod - > init_layout . base_rx = NULL ;
mod - > init_layout . base_rw = NULL ;
mod - > init_layout . size_rx = 0 ;
mod - > init_layout . size_rw = 0 ;
2021-10-27 18:46:41 +05:00
/*
* We want to free module_init , but be aware that kallsyms may be
* walking this with preempt disabled . In all the failure paths , we
2021-11-13 09:26:51 +05:00
* call synchronize_sched ( ) , but we don ' t want to slow down the success
* path , so use actual RCU here .
2021-10-27 18:46:41 +05:00
*/
2021-11-13 09:26:51 +05:00
call_rcu_sched ( & freeinit - > rcu , do_free_init ) ;
2021-10-27 18:46:41 +05:00
mutex_unlock ( & module_mutex ) ;
wake_up_all ( & module_wq ) ;
return 0 ;
fail_free_freeinit :
kfree ( freeinit ) ;
fail :
/* Try to protect us from buggy refcounters. */
mod - > state = MODULE_STATE_GOING ;
2021-11-13 09:26:51 +05:00
synchronize_sched ( ) ;
2021-10-27 18:46:41 +05:00
module_put ( mod ) ;
blocking_notifier_call_chain ( & module_notify_list ,
MODULE_STATE_GOING , mod ) ;
klp_module_going ( mod ) ;
ftrace_release_mod ( mod ) ;
free_module ( mod ) ;
wake_up_all ( & module_wq ) ;
return ret ;
}
static int may_init_module ( void )
{
if ( ! capable ( CAP_SYS_MODULE ) | | modules_disabled )
return - EPERM ;
return 0 ;
}
/*
* We try to place it in the list now to make sure it ' s unique before
* we dedicate too many resources . In particular , temporary percpu
* memory exhaustion .
*/
static int add_unformed_module ( struct module * mod )
{
int err ;
struct module * old ;
mod - > state = MODULE_STATE_UNFORMED ;
again :
mutex_lock ( & module_mutex ) ;
old = find_module_all ( mod - > name , strlen ( mod - > name ) , true ) ;
if ( old ! = NULL ) {
2021-11-13 09:26:51 +05:00
if ( old - > state = = MODULE_STATE_COMING
| | old - > state = = MODULE_STATE_UNFORMED ) {
2021-10-27 18:46:41 +05:00
/* Wait in case it fails to load. */
mutex_unlock ( & module_mutex ) ;
err = wait_event_interruptible ( module_wq ,
finished_loading ( mod - > name ) ) ;
if ( err )
goto out_unlocked ;
goto again ;
}
err = - EEXIST ;
goto out ;
}
mod_update_bounds ( mod ) ;
list_add_rcu ( & mod - > list , & modules ) ;
mod_tree_insert ( mod ) ;
err = 0 ;
out :
mutex_unlock ( & module_mutex ) ;
out_unlocked :
return err ;
}
static int complete_formation ( struct module * mod , struct load_info * info )
{
int err ;
mutex_lock ( & module_mutex ) ;
/* Find duplicate symbols (must be called under lock). */
2021-11-13 09:26:51 +05:00
err = verify_export_symbols ( mod ) ;
2021-10-27 18:46:41 +05:00
if ( err < 0 )
goto out ;
/* This relies on module_mutex for list integrity. */
module_bug_finalize ( info - > hdr , info - > sechdrs , mod ) ;
module_enable_ro ( mod , false ) ;
module_enable_nx ( mod ) ;
2021-11-13 09:26:51 +05:00
/* Mark state as coming so strong_try_module_get() ignores us,
* but kallsyms etc . can see us . */
2021-10-27 18:46:41 +05:00
mod - > state = MODULE_STATE_COMING ;
mutex_unlock ( & module_mutex ) ;
return 0 ;
out :
mutex_unlock ( & module_mutex ) ;
return err ;
}
static int prepare_coming_module ( struct module * mod )
{
int err ;
ftrace_module_enable ( mod ) ;
err = klp_module_coming ( mod ) ;
if ( err )
return err ;
2021-11-13 09:26:51 +05:00
blocking_notifier_call_chain ( & module_notify_list ,
MODULE_STATE_COMING , mod ) ;
return 0 ;
2021-10-27 18:46:41 +05:00
}
static int unknown_module_param_cb ( char * param , char * val , const char * modname ,
void * arg )
{
struct module * mod = arg ;
int ret ;
if ( strcmp ( param , " async_probe " ) = = 0 ) {
mod - > async_probe_requested = true ;
return 0 ;
}
/* Check for magic 'dyndbg' arg */
ret = ddebug_dyndbg_module_param_cb ( param , val , modname ) ;
if ( ret ! = 0 )
pr_warn ( " %s: unknown parameter '%s' ignored \n " , modname , param ) ;
return 0 ;
}
2021-11-13 09:26:51 +05:00
/* Allocate and load the module: note that size of section 0 is always
zero , and we rely on this for optional sections . */
2021-10-27 18:46:41 +05:00
static int load_module ( struct load_info * info , const char __user * uargs ,
int flags )
{
struct module * mod ;
2021-11-13 09:26:51 +05:00
long err ;
2021-10-27 18:46:41 +05:00
char * after_dashes ;
err = module_sig_check ( info , flags ) ;
if ( err )
goto free_copy ;
2021-11-13 09:26:51 +05:00
err = elf_header_check ( info ) ;
2021-10-27 18:46:41 +05:00
if ( err )
goto free_copy ;
/* Figure out module layout, and allocate all the memory. */
mod = layout_and_allocate ( info , flags ) ;
if ( IS_ERR ( mod ) ) {
err = PTR_ERR ( mod ) ;
goto free_copy ;
}
/* Reserve our place in the list. */
err = add_unformed_module ( mod ) ;
if ( err )
goto free_module ;
# ifdef CONFIG_MODULE_SIG
mod - > sig_ok = info - > sig_ok ;
if ( ! mod - > sig_ok ) {
pr_notice_once ( " %s: module verification failed: signature "
" and/or required key missing - tainting "
" kernel \n " , mod - > name ) ;
add_taint_module ( mod , TAINT_UNSIGNED_MODULE , LOCKDEP_STILL_OK ) ;
}
# endif
/* To avoid stressing percpu allocator, do this once we're unique. */
err = percpu_modalloc ( mod , info ) ;
if ( err )
goto unlink_mod ;
/* Now module is in final location, initialize linked lists, etc. */
err = module_unload_init ( mod ) ;
if ( err )
goto unlink_mod ;
init_param_lock ( mod ) ;
2021-11-13 09:26:51 +05:00
/* Now we've got everything in the final locations, we can
* find optional sections . */
2021-10-27 18:46:41 +05:00
err = find_module_sections ( mod , info ) ;
if ( err )
goto free_unload ;
err = check_module_license_and_versions ( mod ) ;
if ( err )
goto free_unload ;
2021-11-13 09:26:51 +05:00
/* Now copy in args */
mod - > args = strndup_user ( uargs , ~ 0UL > > 1 ) ;
if ( IS_ERR ( mod - > args ) ) {
err = PTR_ERR ( mod - > args ) ;
goto free_unload ;
}
2021-10-27 18:46:41 +05:00
/* Set up MODINFO_ATTR fields */
setup_modinfo ( mod , info ) ;
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_GRKERNSEC_MODHARDEN
{
char * p , * p2 ;
if ( strstr ( mod - > args , " grsec_modharden_netdev " ) ) {
printk ( KERN_ALERT " grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead. " , mod - > name ) ;
err = - EPERM ;
goto free_modinfo ;
} else if ( ( p = strstr ( mod - > args , " grsec_modharden_normal " ) ) ) {
p + = sizeof ( " grsec_modharden_normal " ) - 1 ;
p2 = strstr ( p , " _ " ) ;
if ( p2 ) {
* p2 = ' \0 ' ;
printk ( KERN_ALERT " grsec: denied kernel module auto-load of %.64s by uid %.9s \n " , mod - > name , p ) ;
* p2 = ' _ ' ;
}
err = - EPERM ;
goto free_modinfo ;
}
}
# endif
2021-10-27 18:46:41 +05:00
/* Fix up syms, so that st_value is a pointer to location. */
err = simplify_symbols ( mod , info ) ;
if ( err < 0 )
goto free_modinfo ;
err = apply_relocations ( mod , info ) ;
if ( err < 0 )
goto free_modinfo ;
err = post_relocation ( mod , info ) ;
if ( err < 0 )
goto free_modinfo ;
flush_module_icache ( mod ) ;
2021-11-13 09:26:51 +05:00
dynamic_debug_setup ( info - > debug , info - > num_debug ) ;
2021-10-27 18:46:41 +05:00
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
ftrace_module_init ( mod ) ;
/* Finally it's fully formed, ready to start executing. */
err = complete_formation ( mod , info ) ;
if ( err )
goto ddebug_cleanup ;
err = prepare_coming_module ( mod ) ;
if ( err )
goto bug_cleanup ;
/* Module is ready to execute: parsing args may do that. */
after_dashes = parse_args ( mod - > name , mod - > args , mod - > kp , mod - > num_kp ,
- 32768 , 32767 , mod ,
unknown_module_param_cb ) ;
if ( IS_ERR ( after_dashes ) ) {
err = PTR_ERR ( after_dashes ) ;
goto coming_cleanup ;
} else if ( after_dashes ) {
pr_warn ( " %s: parameters '%s' after `--' ignored \n " ,
mod - > name , after_dashes ) ;
}
2021-11-13 09:26:51 +05:00
/* Link in to syfs. */
2021-10-27 18:46:41 +05:00
err = mod_sysfs_setup ( mod , info , mod - > kp , mod - > num_kp ) ;
if ( err < 0 )
goto coming_cleanup ;
if ( is_livepatch_module ( mod ) ) {
err = copy_module_elf ( mod , info ) ;
if ( err < 0 )
goto sysfs_cleanup ;
}
/* Get rid of temporary copy. */
free_copy ( info ) ;
/* Done! */
trace_module_load ( mod ) ;
return do_init_module ( mod ) ;
sysfs_cleanup :
mod_sysfs_teardown ( mod ) ;
coming_cleanup :
blocking_notifier_call_chain ( & module_notify_list ,
MODULE_STATE_GOING , mod ) ;
klp_module_going ( mod ) ;
bug_cleanup :
/* module_bug_cleanup needs module_mutex protection */
mutex_lock ( & module_mutex ) ;
module_bug_cleanup ( mod ) ;
mutex_unlock ( & module_mutex ) ;
2021-11-13 09:26:51 +05:00
/* we can't deallocate the module until we clear memory protection */
module_disable_ro ( mod ) ;
module_disable_nx ( mod ) ;
2021-10-27 18:46:41 +05:00
ddebug_cleanup :
2021-11-13 09:26:51 +05:00
dynamic_debug_remove ( info - > debug ) ;
synchronize_sched ( ) ;
2021-10-27 18:46:41 +05:00
module_arch_cleanup ( mod ) ;
free_modinfo :
free_modinfo ( mod ) ;
2021-11-13 09:26:51 +05:00
kfree ( mod - > args ) ;
2021-10-27 18:46:41 +05:00
free_unload :
module_unload_free ( mod ) ;
unlink_mod :
mutex_lock ( & module_mutex ) ;
/* Unlink carefully: kallsyms could be walking list. */
list_del_rcu ( & mod - > list ) ;
mod_tree_remove ( mod ) ;
wake_up_all ( & module_wq ) ;
/* Wait for RCU-sched synchronizing before releasing mod->list. */
2021-11-13 09:26:51 +05:00
synchronize_sched ( ) ;
2021-10-27 18:46:41 +05:00
mutex_unlock ( & module_mutex ) ;
free_module :
2021-11-13 09:26:51 +05:00
/*
* Ftrace needs to clean up what it initialized .
* This does nothing if ftrace_module_init ( ) wasn ' t called ,
* but it must be called outside of module_mutex .
*/
ftrace_release_mod ( mod ) ;
2021-10-27 18:46:41 +05:00
/* Free lock-classes; relies on the preceding sync_rcu() */
2021-11-13 09:26:51 +05:00
lockdep_free_key_range ( mod - > core_layout . base_rw , mod - > core_layout . size_rw ) ;
lockdep_free_key_range ( mod - > core_layout . base_rx , mod - > core_layout . size_rx ) ;
2021-10-27 18:46:41 +05:00
module_deallocate ( mod , info ) ;
free_copy :
free_copy ( info ) ;
return err ;
}
SYSCALL_DEFINE3 ( init_module , void __user * , umod ,
unsigned long , len , const char __user * , uargs )
{
int err ;
struct load_info info = { } ;
err = may_init_module ( ) ;
if ( err )
return err ;
pr_debug ( " init_module: umod=%p, len=%lu, uargs=%p \n " ,
umod , len , uargs ) ;
err = copy_module_from_user ( umod , len , & info ) ;
if ( err )
return err ;
return load_module ( & info , uargs , 0 ) ;
}
SYSCALL_DEFINE3 ( finit_module , int , fd , const char __user * , uargs , int , flags )
{
struct load_info info = { } ;
2021-11-13 09:26:51 +05:00
loff_t size ;
void * hdr ;
2021-10-27 18:46:41 +05:00
int err ;
err = may_init_module ( ) ;
if ( err )
return err ;
pr_debug ( " finit_module: fd=%d, uargs=%p, flags=%i \n " , fd , uargs , flags ) ;
if ( flags & ~ ( MODULE_INIT_IGNORE_MODVERSIONS
| MODULE_INIT_IGNORE_VERMAGIC ) )
return - EINVAL ;
2021-11-13 09:26:51 +05:00
err = kernel_read_file_from_fd ( fd , & hdr , & size , INT_MAX ,
2021-10-27 18:46:41 +05:00
READING_MODULE ) ;
2021-11-13 09:26:51 +05:00
if ( err )
2021-10-27 18:46:41 +05:00
return err ;
info . hdr = hdr ;
2021-11-13 09:26:51 +05:00
info . len = size ;
2021-10-27 18:46:41 +05:00
return load_module ( & info , uargs , flags ) ;
}
static inline int within ( unsigned long addr , void * start , unsigned long size )
{
return ( ( void * ) addr > = start & & ( void * ) addr < start + size ) ;
}
# ifdef CONFIG_KALLSYMS
/*
* This ignores the intensely annoying " mapping symbols " found
* in ARM ELF files : $ a , $ t and $ d .
*/
static inline int is_arm_mapping_symbol ( const char * str )
{
if ( str [ 0 ] = = ' . ' & & str [ 1 ] = = ' L ' )
return true ;
return str [ 0 ] = = ' $ ' & & strchr ( " axtd " , str [ 1 ] )
& & ( str [ 2 ] = = ' \0 ' | | str [ 2 ] = = ' . ' ) ;
}
2021-11-13 09:26:51 +05:00
static const char * symname ( struct mod_kallsyms * kallsyms , unsigned int symnum )
2021-10-27 18:46:41 +05:00
{
return kallsyms - > strtab + kallsyms - > symtab [ symnum ] . st_name ;
}
2021-11-13 09:26:51 +05:00
static const char * get_ksymbol ( struct module * mod ,
unsigned long addr ,
unsigned long * size ,
unsigned long * offset )
2021-10-27 18:46:41 +05:00
{
unsigned int i , best = 0 ;
2021-11-13 09:26:51 +05:00
unsigned long nextval ;
2021-10-27 18:46:41 +05:00
struct mod_kallsyms * kallsyms = rcu_dereference_sched ( mod - > kallsyms ) ;
/* At worse, next value is at end of module */
2021-11-13 09:26:51 +05:00
if ( within_module_rx ( addr , & mod - > init_layout ) )
nextval = ( unsigned long ) mod - > init_layout . base_rx + mod - > init_layout . size_rx ;
else if ( within_module_rw ( addr , & mod - > init_layout ) )
nextval = ( unsigned long ) mod - > init_layout . base_rw + mod - > init_layout . size_rw ;
else if ( within_module_rx ( addr , & mod - > core_layout ) )
nextval = ( unsigned long ) mod - > core_layout . base_rx + mod - > core_layout . size_rx ;
else if ( within_module_rw ( addr , & mod - > core_layout ) )
nextval = ( unsigned long ) mod - > core_layout . base_rw + mod - > core_layout . size_rw ;
2021-10-27 18:46:41 +05:00
else
2021-11-13 09:26:51 +05:00
return NULL ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1 ) . */
2021-10-27 18:46:41 +05:00
for ( i = 1 ; i < kallsyms - > num_symtab ; i + + ) {
2021-11-13 09:26:51 +05:00
if ( kallsyms - > symtab [ i ] . st_shndx = = SHN_UNDEF )
2021-10-27 18:46:41 +05:00
continue ;
2021-11-13 09:26:51 +05:00
/* We ignore unnamed symbols: they're uninformative
* and inserted at a whim . */
if ( * symname ( kallsyms , i ) = = ' \0 '
| | is_arm_mapping_symbol ( symname ( kallsyms , i ) ) )
2021-10-27 18:46:41 +05:00
continue ;
2021-11-13 09:26:51 +05:00
if ( kallsyms - > symtab [ i ] . st_value < = addr
& & kallsyms - > symtab [ i ] . st_value > kallsyms - > symtab [ best ] . st_value )
2021-10-27 18:46:41 +05:00
best = i ;
2021-11-13 09:26:51 +05:00
if ( kallsyms - > symtab [ i ] . st_value > addr
& & kallsyms - > symtab [ i ] . st_value < nextval )
nextval = kallsyms - > symtab [ i ] . st_value ;
2021-10-27 18:46:41 +05:00
}
if ( ! best )
return NULL ;
if ( size )
2021-11-13 09:26:51 +05:00
* size = nextval - kallsyms - > symtab [ best ] . st_value ;
2021-10-27 18:46:41 +05:00
if ( offset )
2021-11-13 09:26:51 +05:00
* offset = addr - kallsyms - > symtab [ best ] . st_value ;
return symname ( kallsyms , best ) ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
/* For kallsyms to ask for address resolution. NULL means not found. Careful
* not to lock to avoid deadlock on oopses , simply disable preemption . */
2021-10-27 18:46:41 +05:00
const char * module_address_lookup ( unsigned long addr ,
unsigned long * size ,
unsigned long * offset ,
char * * modname ,
char * namebuf )
{
const char * ret = NULL ;
struct module * mod ;
preempt_disable ( ) ;
mod = __module_address ( addr ) ;
if ( mod ) {
if ( modname )
* modname = mod - > name ;
2021-11-13 09:26:51 +05:00
ret = get_ksymbol ( mod , addr , size , offset ) ;
2021-10-27 18:46:41 +05:00
}
/* Make a copy in here where it's safe */
if ( ret ) {
strncpy ( namebuf , ret , KSYM_NAME_LEN - 1 ) ;
ret = namebuf ;
}
preempt_enable ( ) ;
return ret ;
}
int lookup_module_symbol_name ( unsigned long addr , char * symname )
{
struct module * mod ;
preempt_disable ( ) ;
list_for_each_entry_rcu ( mod , & modules , list ) {
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
if ( within_module ( addr , mod ) ) {
const char * sym ;
2021-11-13 09:26:51 +05:00
sym = get_ksymbol ( mod , addr , NULL , NULL ) ;
2021-10-27 18:46:41 +05:00
if ( ! sym )
goto out ;
strlcpy ( symname , sym , KSYM_NAME_LEN ) ;
preempt_enable ( ) ;
return 0 ;
}
}
out :
preempt_enable ( ) ;
return - ERANGE ;
}
int lookup_module_symbol_attrs ( unsigned long addr , unsigned long * size ,
unsigned long * offset , char * modname , char * name )
{
struct module * mod ;
preempt_disable ( ) ;
list_for_each_entry_rcu ( mod , & modules , list ) {
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
if ( within_module ( addr , mod ) ) {
const char * sym ;
2021-11-13 09:26:51 +05:00
sym = get_ksymbol ( mod , addr , size , offset ) ;
2021-10-27 18:46:41 +05:00
if ( ! sym )
goto out ;
if ( modname )
strlcpy ( modname , mod - > name , MODULE_NAME_LEN ) ;
if ( name )
strlcpy ( name , sym , KSYM_NAME_LEN ) ;
preempt_enable ( ) ;
return 0 ;
}
}
out :
preempt_enable ( ) ;
return - ERANGE ;
}
int module_get_kallsym ( unsigned int symnum , unsigned long * value , char * type ,
char * name , char * module_name , int * exported )
{
struct module * mod ;
preempt_disable ( ) ;
list_for_each_entry_rcu ( mod , & modules , list ) {
struct mod_kallsyms * kallsyms ;
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
kallsyms = rcu_dereference_sched ( mod - > kallsyms ) ;
if ( symnum < kallsyms - > num_symtab ) {
2021-11-13 09:26:51 +05:00
* value = kallsyms - > symtab [ symnum ] . st_value ;
* type = kallsyms - > symtab [ symnum ] . st_info ;
strlcpy ( name , symname ( kallsyms , symnum ) , KSYM_NAME_LEN ) ;
2021-10-27 18:46:41 +05:00
strlcpy ( module_name , mod - > name , MODULE_NAME_LEN ) ;
* exported = is_exported ( name , * value , mod ) ;
preempt_enable ( ) ;
return 0 ;
}
symnum - = kallsyms - > num_symtab ;
}
preempt_enable ( ) ;
return - ERANGE ;
}
2021-11-13 09:26:51 +05:00
static unsigned long mod_find_symname ( struct module * mod , const char * name )
2021-10-27 18:46:41 +05:00
{
unsigned int i ;
struct mod_kallsyms * kallsyms = rcu_dereference_sched ( mod - > kallsyms ) ;
2021-11-13 09:26:51 +05:00
for ( i = 0 ; i < kallsyms - > num_symtab ; i + + )
if ( strcmp ( name , symname ( kallsyms , i ) ) = = 0 & &
kallsyms - > symtab [ i ] . st_info ! = ' U ' )
return kallsyms - > symtab [ i ] . st_value ;
2021-10-27 18:46:41 +05:00
return 0 ;
}
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name ( const char * name )
{
struct module * mod ;
char * colon ;
unsigned long ret = 0 ;
/* Don't lock: we're in enough trouble already. */
preempt_disable ( ) ;
2021-11-13 09:26:51 +05:00
if ( ( colon = strchr ( name , ' : ' ) ) ! = NULL ) {
2021-10-27 18:46:41 +05:00
if ( ( mod = find_module_all ( name , colon - name , false ) ) ! = NULL )
2021-11-13 09:26:51 +05:00
ret = mod_find_symname ( mod , colon + 1 ) ;
2021-10-27 18:46:41 +05:00
} else {
list_for_each_entry_rcu ( mod , & modules , list ) {
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
2021-11-13 09:26:51 +05:00
if ( ( ret = mod_find_symname ( mod , name ) ) ! = 0 )
2021-10-27 18:46:41 +05:00
break ;
}
}
preempt_enable ( ) ;
return ret ;
}
int module_kallsyms_on_each_symbol ( int ( * fn ) ( void * , const char * ,
struct module * , unsigned long ) ,
void * data )
{
struct module * mod ;
unsigned int i ;
2021-11-13 09:26:51 +05:00
int ret ;
module_assert_mutex ( ) ;
2021-10-27 18:46:41 +05:00
list_for_each_entry ( mod , & modules , list ) {
/* We hold module_mutex: no need for rcu_dereference_sched */
struct mod_kallsyms * kallsyms = mod - > kallsyms ;
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
for ( i = 0 ; i < kallsyms - > num_symtab ; i + + ) {
2021-11-13 09:26:51 +05:00
ret = fn ( data , symname ( kallsyms , i ) ,
mod , kallsyms - > symtab [ i ] . st_value ) ;
2021-10-27 18:46:41 +05:00
if ( ret ! = 0 )
2021-11-13 09:26:51 +05:00
return ret ;
2021-10-27 18:46:41 +05:00
}
}
2021-11-13 09:26:51 +05:00
return 0 ;
2021-10-27 18:46:41 +05:00
}
# endif /* CONFIG_KALLSYMS */
static char * module_flags ( struct module * mod , char * buf )
{
int bx = 0 ;
BUG_ON ( mod - > state = = MODULE_STATE_UNFORMED ) ;
if ( mod - > taints | |
mod - > state = = MODULE_STATE_GOING | |
mod - > state = = MODULE_STATE_COMING ) {
buf [ bx + + ] = ' ( ' ;
bx + = module_flags_taint ( mod , buf + bx ) ;
/* Show a - for module-is-being-unloaded */
if ( mod - > state = = MODULE_STATE_GOING )
buf [ bx + + ] = ' - ' ;
/* Show a + for module-is-being-loaded */
if ( mod - > state = = MODULE_STATE_COMING )
buf [ bx + + ] = ' + ' ;
buf [ bx + + ] = ' ) ' ;
}
buf [ bx ] = ' \0 ' ;
return buf ;
}
# ifdef CONFIG_PROC_FS
/* Called by the /proc file system to return a list of modules. */
static void * m_start ( struct seq_file * m , loff_t * pos )
{
mutex_lock ( & module_mutex ) ;
return seq_list_start ( & modules , * pos ) ;
}
static void * m_next ( struct seq_file * m , void * p , loff_t * pos )
{
return seq_list_next ( p , & modules , pos ) ;
}
static void m_stop ( struct seq_file * m , void * p )
{
mutex_unlock ( & module_mutex ) ;
}
static int m_show ( struct seq_file * m , void * p )
{
struct module * mod = list_entry ( p , struct module , list ) ;
2021-11-13 09:26:51 +05:00
char buf [ 8 ] ;
2021-10-27 18:46:41 +05:00
/* We always ignore unformed modules. */
if ( mod - > state = = MODULE_STATE_UNFORMED )
return 0 ;
seq_printf ( m , " %s %u " ,
2021-11-13 09:26:51 +05:00
mod - > name , mod - > init_layout . size_rx + mod - > init_layout . size_rw + mod - > core_layout . size_rx + mod - > core_layout . size_rw ) ;
2021-10-27 18:46:41 +05:00
print_unload_info ( m , mod ) ;
/* Informative for users. */
seq_printf ( m , " %s " ,
mod - > state = = MODULE_STATE_GOING ? " Unloading " :
mod - > state = = MODULE_STATE_COMING ? " Loading " :
" Live " ) ;
/* Used by oprofile and other similar tools. */
2021-11-13 09:26:51 +05:00
seq_printf ( m , " 0x%pK 0x%pK " , mod - > core_layout . base_rx , mod - > core_layout . base_rw ) ;
2021-10-27 18:46:41 +05:00
/* Taints info */
if ( mod - > taints )
seq_printf ( m , " %s " , module_flags ( mod , buf ) ) ;
seq_puts ( m , " \n " ) ;
return 0 ;
}
2021-11-13 09:26:51 +05:00
/* Format: modulename size refcount deps address
Where refcount is a number or - , and deps is a comma - separated list
of depends or - .
*/
2021-10-27 18:46:41 +05:00
static const struct seq_operations modules_op = {
. start = m_start ,
. next = m_next ,
. stop = m_stop ,
. show = m_show
} ;
static int modules_open ( struct inode * inode , struct file * file )
{
2021-11-13 09:26:51 +05:00
return seq_open ( file , & modules_op ) ;
2021-10-27 18:46:41 +05:00
}
2021-11-13 09:26:51 +05:00
static const struct file_operations proc_modules_operations = {
. open = modules_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = seq_release ,
2021-10-27 18:46:41 +05:00
} ;
static int __init proc_modules_init ( void )
{
2021-11-13 09:26:51 +05:00
# ifndef CONFIG_GRKERNSEC_HIDESYM
# ifdef CONFIG_GRKERNSEC_PROC_USER
proc_create ( " modules " , S_IRUSR , NULL , & proc_modules_operations ) ;
# elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
proc_create ( " modules " , S_IRUSR | S_IRGRP , NULL , & proc_modules_operations ) ;
# else
proc_create ( " modules " , 0 , NULL , & proc_modules_operations ) ;
# endif
# else
proc_create ( " modules " , S_IRUSR , NULL , & proc_modules_operations ) ;
# endif
2021-10-27 18:46:41 +05:00
return 0 ;
}
module_init ( proc_modules_init ) ;
# endif
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry * search_module_extables ( unsigned long addr )
{
const struct exception_table_entry * e = NULL ;
struct module * mod ;
preempt_disable ( ) ;
2021-11-13 09:26:51 +05:00
list_for_each_entry_rcu ( mod , & modules , list ) {
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
if ( mod - > num_exentries = = 0 )
continue ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
e = search_extable ( mod - > extable ,
mod - > extable + mod - > num_exentries - 1 ,
addr ) ;
if ( e )
break ;
}
2021-10-27 18:46:41 +05:00
preempt_enable ( ) ;
2021-11-13 09:26:51 +05:00
/* Now, if we found one, we are running inside it now, hence
we cannot unload the module , hence no refcnt needed . */
2021-10-27 18:46:41 +05:00
return e ;
}
2021-11-13 09:26:51 +05:00
/*
* is_module_address - is this address inside a module ?
2021-10-27 18:46:41 +05:00
* @ addr : the address to check .
*
* See is_module_text_address ( ) if you simply want to see if the address
* is code ( not data ) .
*/
bool is_module_address ( unsigned long addr )
{
bool ret ;
preempt_disable ( ) ;
ret = __module_address ( addr ) ! = NULL ;
preempt_enable ( ) ;
return ret ;
}
2021-11-13 09:26:51 +05:00
/*
* __module_address - get the module which contains an address .
2021-10-27 18:46:41 +05:00
* @ addr : the address .
*
* Must be called with preempt disabled or module mutex held so that
* module doesn ' t get freed during this .
*/
struct module * __module_address ( unsigned long addr )
{
struct module * mod ;
2021-11-13 09:26:51 +05:00
# ifdef CONFIG_X86_32
unsigned long vaddr = ktla_ktva ( addr ) ;
if ( module_addr_min_rx < = vaddr & & vaddr < = module_addr_max_rx )
addr = vaddr ;
# endif
if ( ( addr < module_addr_min_rx | | addr > module_addr_max_rx ) & &
( addr < module_addr_min_rw | | addr > module_addr_max_rw ) )
2021-10-27 18:46:41 +05:00
return NULL ;
module_assert_mutex_or_preempt ( ) ;
mod = mod_find ( addr ) ;
if ( mod ) {
BUG_ON ( ! within_module ( addr , mod ) ) ;
if ( mod - > state = = MODULE_STATE_UNFORMED )
mod = NULL ;
}
return mod ;
}
2021-11-13 09:26:51 +05:00
EXPORT_SYMBOL_GPL ( __module_address ) ;
2021-10-27 18:46:41 +05:00
2021-11-13 09:26:51 +05:00
/*
* is_module_text_address - is this address inside module code ?
2021-10-27 18:46:41 +05:00
* @ addr : the address to check .
*
* See is_module_address ( ) if you simply want to see if the address is
* anywhere in a module . See kernel_text_address ( ) for testing if an
* address corresponds to kernel or module code .
*/
bool is_module_text_address ( unsigned long addr )
{
bool ret ;
preempt_disable ( ) ;
ret = __module_text_address ( addr ) ! = NULL ;
preempt_enable ( ) ;
return ret ;
}
2021-11-13 09:26:51 +05:00
/*
* __module_text_address - get the module whose code contains an address .
2021-10-27 18:46:41 +05:00
* @ addr : the address .
*
* Must be called with preempt disabled or module mutex held so that
* module doesn ' t get freed during this .
*/
struct module * __module_text_address ( unsigned long addr )
{
2021-11-13 09:26:51 +05:00
struct module * mod ;
# ifdef CONFIG_X86_32
addr = ktla_ktva ( addr ) ;
# endif
if ( addr < module_addr_min_rx | | addr > module_addr_max_rx )
return NULL ;
mod = __module_address ( addr ) ;
2021-10-27 18:46:41 +05:00
if ( mod ) {
/* Make sure it's within the text section. */
2021-11-13 09:26:51 +05:00
if ( ! within_module_rx ( addr , & mod - > init_layout )
& & ! within_module_rx ( addr , & mod - > core_layout ) )
2021-10-27 18:46:41 +05:00
mod = NULL ;
}
return mod ;
}
2021-11-13 09:26:51 +05:00
EXPORT_SYMBOL_GPL ( __module_text_address ) ;
2021-10-27 18:46:41 +05:00
/* Don't grab lock, we're oopsing. */
void print_modules ( void )
{
struct module * mod ;
2021-11-13 09:26:51 +05:00
char buf [ 8 ] ;
2021-10-27 18:46:41 +05:00
printk ( KERN_DEFAULT " Modules linked in: " ) ;
/* Most callers should already have preempt disabled, but make sure */
preempt_disable ( ) ;
list_for_each_entry_rcu ( mod , & modules , list ) {
if ( mod - > state = = MODULE_STATE_UNFORMED )
continue ;
pr_cont ( " %s%s " , mod - > name , module_flags ( mod , buf ) ) ;
}
preempt_enable ( ) ;
if ( last_unloaded_module [ 0 ] )
pr_cont ( " [last unloaded: %s] " , last_unloaded_module ) ;
pr_cont ( " \n " ) ;
}
# ifdef CONFIG_MODVERSIONS
2021-11-13 09:26:51 +05:00
/* Generate the signature for all relevant module structures here.
* If these change , we don ' t want to try to parse the module . */
__visible void module_layout ( struct module * mod ,
2021-10-27 18:46:41 +05:00
struct modversion_info * ver ,
struct kernel_param * kp ,
struct kernel_symbol * ks ,
struct tracepoint * const * tp )
{
}
EXPORT_SYMBOL ( module_layout ) ;
# endif