Relay 的前身是 RelayFS,即作为 Linux 的一个新型文件系统。2003年3月,RelayFS的第一个版本的代码被开发出来,在7月14日,第一个针对2.6内核的版本也开始提供下载。经过广泛的试用和改进,直到2005年9月,RelayFS才被加入mainline内核(2.6.14)。同时,RelayFS也被移植到2.4内核中。在2006年2月,从2.6.17开始,RelayFS不再作为单独的文件系统存在,而是成为内核的一部分。它的源码也从fs/目录下转移到kernel/relay.c中,名称中也从RelayFS改成了Relay。
[root@localhost ~]# service cgconfig status
Stopped
[root@localhost ~]# service cgconfig start
Starting cgconfig service: [ OK ]
[root@localhost ~]# service cgconfig status
Running
[root@localhost /]# ls -l /dev/sda
brw-rw----. 1 root disk 8, 0 Sep 15 04:19 /dev/sda
7.1.4 修改cgrules.conf文件
123456789101112
[root@localhost ~]# vi /etc/cgrules.conf
# /etc/cgrules.conf
#The format of this file is described in cgrules.conf(5)
#manual page.
#
# Example:
#<user> <controllers> <destination>
#@student cpu,memory usergroup/student/
#peter cpu test1/
#% memory test2/
*:/usr/local/mysql/bin/mysqld * mysql_g1
注:共分为3个部分,分别为需要限制的实例,限制的内容(如cpu,memory),挂载目标。
7.2 使配置生效
123456
[root@localhost ~]# /etc/init.d/cgconfig restart
Stopping cgconfig service: [ OK ]
Starting cgconfig service: [ OK ]
[root@localhost ~]# /etc/init.d/cgred restart
Stopping CGroup Rules Engine Daemon... [ OK ]
Starting CGroup Rules Engine Daemon: [ OK ]
void synchronize_rcu(void)
{
struct rcu_synchronize rcu;
init_completion(&rcu.completion);
/* Will wake me after RCU finished */
call_rcu(&rcu.head, wakeme_after_rcu);
/* Wait for it */
wait_for_completion(&rcu.completion);
}
static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
rcu_online_cpu(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
rcu_offline_cpu(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
{
/* This cpu has pending rcu entries and the grace period
* for them has completed.
*/
if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
return 1;
/* This cpu has no pending entries, but there are new entries */
if (!rdp->curlist && rdp->nxtlist)
return 1;
/* This cpu has finished callbacks to invoke */
if (rdp->donelist)
return 1;
/* The rcu core waits for a quiescent state from the cpu */
if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
return 1;
/* nothing to do */
return 0;
}
static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
struct rcu_data *rdp)
{
if (rdp->quiescbatch != rcp->cur) {
/* start new grace period: */
rdp->qs_pending = 1;
rdp->passed_quiesc = 0;
rdp->quiescbatch = rcp->cur;
return;
}
/* Grace period already completed for this cpu?
* qs_pending is checked instead of the actual bitmap to avoid
* cacheline trashing.
*/
if (!rdp->qs_pending)
return;
/*
* Was there a quiescent state since the beginning of the grace
* period? If no, then exit and wait for the next call.
*/
if (!rdp->passed_quiesc)
return;
rdp->qs_pending = 0;
spin_lock(&rcp->lock);
/*
* rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
* during cpu startup. Ignore the quiescent state.
*/
if (likely(rdp->quiescbatch == rcp->cur))
cpu_quiet(rdp->cpu, rcp);
spin_unlock(&rcp->lock);
}
/* FD status is defined by the poller's status and by the speculative I/O list */
int fd_nbupdt = 0; // number of updates in the list
unsigned int *fd_updt = NULL; // FD updates list
/* info about one given fd */
struct fdtab {
int (*iocb)(int fd); /* I/O handler, returns FD_WAIT_* */
void *owner; /* the connection or listener associated with this fd, NULL if closed */
unsigned int spec_p; /* speculative polling: position in spec list+1. 0=not in list. */
unsigned char spec_e; /* speculative polling: read and write events status. 4 bits */
unsigned char ev; /* event seen in return of poll() : FD_POLL_* */
unsigned char new:1; /* 1 if this fd has just been created */
unsigned char updated:1; /* 1 if this fd is already in the update list */
};
static inline void fd_ev_set(int fd, int dir)
{
unsigned int i = ((unsigned int)fdtab[fd].spec_e) & (FD_EV_STATUS << dir);
...
if (i & (FD_EV_ACTIVE << dir))
return; /* already in desired state */
fdtab[fd].spec_e |= (FD_EV_ACTIVE << dir);
updt_fd(fd); /* need an update entry to change the state */
}
169 if (fdtab[fd].iocb) {
170 int new_updt, old_updt;
171
172 /* Mark the events as speculative before processing
173 * them so that if nothing can be done we don't need
174 * to poll again.
175 */
176 if (fdtab[fd].ev & FD_POLL_IN)
177 fd_ev_set(fd, DIR_RD);
178
179 if (fdtab[fd].ev & FD_POLL_OUT)
180 fd_ev_set(fd, DIR_WR);
181
182 if (fdtab[fd].spec_p) {
183 /* This fd was already scheduled for being called as a speculative I/O */
184 continue;
185 }
186
187 /* Save number of updates to detect creation of new FDs. */
188 old_updt = fd_nbupdt;
189 fdtab[fd].iocb(fd);
void process_runnable_tasks(int *next)
{
...
eb = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK);
while (max_processed--) {
...
t = eb32_entry(eb, struct task, rq);
eb = eb32_next(eb);
__task_unlink_rq(t);
t->state |= TASK_RUNNING;
/* This is an optimisation to help the processor's branch
* predictor take this most common call.
*/
t->calls++;
if (likely(t->process == process_session))
t = process_session(t);
else
t = t->process(t);
...
}
}