structmm_struct *mm, *active_mm;//进程的地址空间 #ifdef CONFIG_COMPAT_BRK unsigned brk_randomized:1; #endif /* per-thread vma caching */ u32 vmacache_seqnum; structvm_area_struct *vmacache[VMACACHE_SIZE]; #if defined(SPLIT_RSS_COUNTING) structtask_rss_statrss_stat; #endif /* task state */ //进程状态参数 int exit_state; int exit_code, exit_signal;//进程退出发出的信号 int pdeath_signal; /* The signal sent when the parent dies */ unsignedint jobctl; /* JOBCTL_*, siglock protected */
/* Used for emulating ABI behavior of previous Linux versions */ unsignedint personality;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an * execve */ unsigned in_iowait:1;
/* Revert to default priority/policy when forking */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1;
pid_t pid;//进程pid pid_t tgid;//父进程tgid //防止内核栈溢出 #ifdef CONFIG_CC_STACKPROTECTOR /* Canary value for the -fstack-protector gcc feature */ unsignedlong stack_canary; #endif /* * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ structtask_struct __rcu *real_parent;/* real parent process */ structtask_struct __rcu *parent;/* recipient of SIGCHLD, wait4() reports */ /* * children/sibling forms the list of my natural children */ //子进程链表 structlist_headchildren;/* list of my children */ //兄弟链表 structlist_headsibling;/* linkage in my parent's children list */ //线程组组长 structtask_struct *group_leader;/* threadgroup leader */
/* * ptraced is the list of tasks this task is using ptrace on. * This includes both natural children and PTRACE_ATTACH targets. * p->ptrace_entry is p's link on the p->parent->ptraced list. */ //系统调用 关于断开调试 structlist_headptraced; structlist_headptrace_entry;
/* PID/PID hash table linkage. */ //PID/PID散列表的关系 structpid_linkpids[PIDTYPE_MAX]; structlist_headthread_group; structlist_headthread_node; //do_fork()函数 structcompletion *vfork_done;/* for vfork() */ int __user *set_child_tid; /* CLONE_CHILD_SETTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ //描述CPU时间的内容 //utime 用户态下的执行时间 //stime 内核态下的执行时间 cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE structcputimeprev_cputime; #endif #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqlock_t vtime_seqlock; unsignedlonglong vtime_snap; enum { VTIME_SLEEPING = 0, VTIME_USER, VTIME_SYS, } vtime_snap_whence; #endif unsignedlong nvcsw, nivcsw; /* context switch counts */ u64 start_time; /* monotonic time in nsec */ u64 real_start_time; /* boot based time in nsec */ /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsignedlong min_flt, maj_flt;
/* Thread group tracking */ u32 parent_exec_id; u32 self_exec_id; /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, * mempolicy */ spinlock_t alloc_lock;
/* Protection of the PI data structures: */ raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task */ structrb_rootpi_waiters; structrb_node *pi_waiters_leftmost; /* Deadlock detection and priority inheritance handling */ structrt_mutex_waiter *pi_blocked_on; #endif
/* * numa_faults is an array split into four regions: * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer * in this precise order. * * faults_memory: Exponential decaying average of faults on a per-node * basis. Scheduling placement decisions are made based on these * counts. The values remain static for the duration of a PTE scan. * faults_cpu: Track the nodes the process was running on when a NUMA * hinting fault was incurred. * faults_memory_buffer and faults_cpu_buffer: Record faults per node * during the current scan window. When the scan completes, the counts * in faults_memory and faults_cpu decay and these values are copied. */ unsignedlong *numa_faults; unsignedlong total_numa_faults;
/* * numa_faults_locality tracks if faults recorded during the last * scan window were remote/local or failed to migrate. The task scan * period is adapted based on the locality of the faults with different * weights depending on whether they were shared or private faults */ unsignedlong numa_faults_locality[3];
/* * cache last used pipe for splice */ structpipe_inode_info *splice_pipe;//管道
structpage_fragtask_frag;
#ifdef CONFIG_TASK_DELAY_ACCT structtask_delay_info *delays;//延迟计数 #endif #ifdef CONFIG_FAULT_INJECTION int make_it_fail; #endif /* * when (nr_dirtied >= nr_dirtied_pause), it's time to call * balance_dirty_pages() for some dirty throttling pause */ int nr_dirtied; int nr_dirtied_pause; unsignedlong dirty_paused_when; /* start of a write-and-pause period */
#ifdef CONFIG_LATENCYTOP int latency_record_count; structlatency_recordlatency_record[LT_SAVECOUNT]; #endif /* * time slack values; these are used to round up poll() and * select() etc timeout values. These are in nanoseconds. */ unsignedlong timer_slack_ns; unsignedlong default_timer_slack_ns;
#ifdef CONFIG_KASAN unsignedint kasan_depth; #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack */ int curr_ret_stack; /* Stack of return addresses for return function tracing */ structftrace_ret_stack *ret_stack; /* time stamp for last schedule */ unsignedlonglong ftrace_timestamp; /* * Number of functions that haven't been traced * because of depth overrun. */ atomic_t trace_overrun; /* Pause for the tracing */ atomic_t tracing_graph_pause; #endif #ifdef CONFIG_TRACING /* state flags for use by tracers */ unsignedlong trace; /* bitmask and counter of trace recursion */ unsignedlong trace_recursion; #endif/* CONFIG_TRACING */ #ifdef CONFIG_MEMCG structmemcg_oom_info { structmem_cgroup *memcg; gfp_t gfp_mask; int order; unsignedint may_oom:1; } memcg_oom; #endif #ifdef CONFIG_UPROBES structuprobe_task *utask; #endif #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) unsignedint sequential_io; unsignedint sequential_io_avg; #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsignedlong task_state_change; #endif };
/* * It is the responsibility of the pick_next_task() method that will * return the next task to call put_prev_task() on the @prev task or * something equivalent. * * May return RETRY_TASK when it finds a higher prio class has runnable * tasks. */ //选择下一个应该要运行的进程 structtask_struct * (*pick_next_task) (structrq *rq, structtask_struct *prev); //将进程放回到运行列队中 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP //为进程选择一个合适的CPU int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); //迁移任务到另一个CPU void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
/* * The switched_from() call is allowed to drop rq->lock, therefore we * cannot assume the switched_from/switched_to pair is serliazed by * rq->lock. They are however serialized by p->pi_lock. */ //用于进程切换操作 void (*switched_from) (struct rq *this_rq, struct task_struct *task); void (*switched_to) (struct rq *this_rq, struct task_struct *task); //改变进程优先级 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, int oldprio);
/* * 'curr' points to currently running entity on this cfs_rq. * It is set to NULL otherwise (i.e when none are currently running). */ structsched_entity *curr, *next, *last, *skip;
#ifdef CONFIG_SMP /* * CFS Load tracking * Under CFS, load is tracked on a per-entity basis and aggregated up. * This allows for the description of both thread and group usage (in * the FAIR_GROUP_SCHED case). */ unsignedlong runnable_load_avg, blocked_load_avg; atomic64_t decay_counter; u64 last_decay; atomic_long_t removed_load;
#ifdef CONFIG_FAIR_GROUP_SCHED /* Required to track per-cpu representation of a task_group */ u32 tg_runnable_contrib; unsignedlong tg_load_contrib;
/* * h_load = weight * f(tg) * * Where f(tg) is the recursive weight fraction assigned to * this group. */ unsignedlong h_load; u64 last_h_load_update; structsched_entity *h_load_next; #endif/* CONFIG_FAIR_GROUP_SCHED */ #endif/* CONFIG_SMP */
#ifdef CONFIG_FAIR_GROUP_SCHED structrq *rq;/* cpu runqueue to which this cfs_rq is attached */
/* * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in * a hierarchy). Non-leaf lrqs hold other higher schedulable entities * (like users, containers etc.) * * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This * list is used during load balance. */ int on_list; structlist_headleaf_cfs_rq_list; //拥有该CFS运行队列的进程组 structtask_group *tg;/* group that "owns" this runqueue */
#ifdef CONFIG_CFS_BANDWIDTH int runtime_enabled; u64 runtime_expires; s64 runtime_remaining;
structsched_rt_entity *back;//dequeue_rt_stack作为临时变量 #ifdef CONFIG_RT_GROUP_SCHED structsched_rt_entity *parent;//指向上层调度实体 /* rq on which this entity is (to be) queued: */ structrt_rq *rt_rq;//当前实时调度实体所在的就绪队列 /* rq "owned" by this entity/group: */ structrt_rq *my_q;//当前实时调度实体的子调度实体所在就绪队列 #endif };