Linux kernel traffic control (13)

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn

5.11.9 Enqueue
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
int ret;
// HTB Private data structure
struct htb_sched *q = qdisc_priv(sch);
// Classify packets
struct htb_class *cl = htb_classify(skb, sch, &ret);
if (cl == HTB_DIRECT) {
// Classification results are sent directly
/* enqueue to helper queue */
// If sent directly to the packet queue length is less than the queue limit, the data packets to the queue at the end of
if (q->direct_queue.qlen < q->direct_qlen) {
__skb_queue_tail(&q->direct_queue, skb);
q->direct_pkts++;
} else {
// Otherwise, discard the packet
kfree_skb(skb);
sch->qstats.drops++;
return NET_XMIT_DROP;
}
#ifdef CONFIG_NET_CLS_ACT
// The situation is defined NET_CLS_ACT ( Support the classification action )
} else if (!cl) {
// Classification did not result, packet loss
if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
#endif
// A classification result, the leaf node related to the classification into the flow control structure of team operations
} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
NET_XMIT_SUCCESS) {
// Into the team is unsuccessful packet loss
sch->qstats.drops++;
cl->qstats.drops++;
return NET_XMIT_DROP;
} else {
// Into the team successful, classification structure of the packets of statistics to increase the number of bytes
cl->bstats.packets++;
cl->bstats.bytes += skb->len;
// Activation HTB class , Types of data available to establish the tree, so that dequeue Packets can be obtained from
// Model is the only type of node can be sent and will be activated under loan, if the node is blocked
// Mode, it will not be activated
htb_activate(q, cl);
}
// HTB Flow control structure of statistical update, into a successful team
sch->q.qlen++;
sch->bstats.packets++;
sch->bstats.bytes += skb->len;
return NET_XMIT_SUCCESS;
}
In most cases data packets will not be entered directly with the queue, but into various types of leaf node , So into the team's success lies in what kind of leaf node flow control algorithm used, most successfully into the team should be able to , Into the team model does not involve the adjustment of category node .
5.11.10 Re-entry team
/* TODO: requeuing packet charges it to policers again !! */
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
// HTB Private data structure
struct htb_sched *q = qdisc_priv(sch);
int ret = NET_XMIT_SUCCESS;
// HTB packet classification on
struct htb_class *cl = htb_classify(skb, sch, &ret);
struct sk_buff *tskb;
// Is a direct deal or no class packet
if (cl == HTB_DIRECT || !cl) {
// If the current queue is not full of direct and direct processing package to add a list of the first team in the directly
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen && cl) {
__skb_queue_head(&q->direct_queue, skb);
} else {
// Slow or no class packet queue
// Added to the head of the queue
__skb_queue_head(&q->direct_queue, skb);
// Taken from the queue, a packet drop tail
tskb = __skb_dequeue_tail(&q->direct_queue);
kfree_skb(tskb);
sch->qstats.drops++;
return NET_XMIT_CN;
}
// Category successful, the use of class structure corresponding to the leaf node reentrant flow control teams operating
} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
NET_XMIT_SUCCESS) {
// Weight into the team fails, packet loss
sch->qstats.drops++;
cl->qstats.drops++;
return NET_XMIT_DROP;
} else
// Successful re-entry team, the activation type of structure , Data package provides the tree structure
htb_activate(q, cl);
// Queue statistics update
sch->q.qlen++;
sch->qstats.requeues++;
return NET_XMIT_SUCCESS;
}
5.11.11 The team
HTB Of the team is a very complex process, function call is :
htb_dequeue
-> __skb_dequeue
-> htb_do_events
-> htb_safe_rb_erase
-> htb_change_class_mode
-> htb_add_to_wait_tree
-> htb_dequeue_tree
-> htb_lookup_leaf
-> htb_deactivate
-> q->dequeue
-> htb_next_rb_node
-> htb_charge_class
-> htb_change_class_mode
-> htb_safe_rb_erase
-> htb_add_to_wait_tree
-> htb_delay_by
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb = NULL;
// HTB Private data structure
struct htb_sched *q = qdisc_priv(sch);
int level;
long min_delay;
// Save the current time ticks
q->jiffies = jiffies;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
// Start directly fetching the current packet of data sent directly to the queue has the highest priority , Can be said that no traffic restrictions
skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
// Access to the data packet, update the parameters , Non-blocking, return
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--;
return skb;
}
// If the HTB queue length of flow control structures 0, Return null
if (!sch->q.qlen)
goto fin;
// Get the current value of effective time
PSCHED_GET_TIME(q->now);
// Minimum delay value is initialized to the largest integer
min_delay = LONG_MAX;
q->nwc_hit = 0;
// Traverse all levels of the tree, starting from the leaf node
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
long delay;
// Calculate the delay value, is to take the packet failed to be updated HTB Delay timer
// ROW tree comparing the recent events in the regular layer nodes whether the time has come to
if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
// Time to deal with HTB Event, the return value is the delay time of the next event
delay = htb_do_events(q, level);
// Regular time to update the layer has recently
q->near_ev_cache[level] =
q->jiffies + (delay ? delay : HZ);
} else
// No time to calculate the time difference between the two
delay = q->near_ev_cache[level] - q->jiffies;
// Update minimum delay value, note that this is to update inside the loop , Find the minimum delay cycle
if (delay && min_delay > delay)
min_delay = delay;
// The invert level of row_mask , Is to find the actual row_mask[level] 1 bit for , 1 indicates that the tree has a packet available
m = ~q->row_mask[level];
while (m != (int)(-1)) {
// m The first data bit position 0 value as a priority , Start looking from the low, that is, prio The smaller, the actual data
// Priority, the more first-out team
int prio = ffz(m);
// Set the 0 1, This bit is cleared
m |= 1 << prio;
// Value from the priority flow control tree for dequeue
// HTB The flow control function embodied in the
skb = htb_dequeue_tree(q, prio, level);
if (likely(skb != NULL)) {
// Success of a team packet, update parameters , Exit the loop and return packets
// Packets succeed, we must get rid of the blocking node flow control flag
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
goto fin;
}
}
}
// Cycle will end, but did not get to the data packet, the queue length of non- 0 Can not remove the data packets, said the blocking node flow control
// The blocking treatment, adjusted HTB Timer, the maximum delay 5 Seconds
htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
fin:
return skb;
}
/* dequeues packet at given priority and level; call only if
you are sure that there is active class at prio/level */
// From the specified level and RB tree node priority packet access
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
int level)
{
struct sk_buff *skb = NULL;
struct htb_class *cl, *start;
/* look initial class up in the row */
// Find the value based on the starting level and type of the node priority
start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
q->ptr[level] + prio,
q->last_ptr_id[level] + prio);
do {
next:
BUG_TRAP(cl);
// If the class is empty, return an empty packet
if (!cl)
return NULL;
/* class can be empty - it is unlikely but can be true if leaf
qdisc drops packets in enqueue routine or if someone used
graft operation on the leaf since last dequeue;
simply deactivate and skip such class */
// If the queue length is 0, the queue is empty, the situation , Less likely
if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
struct htb_class *next;
// The class did not queue packets, and to stop the class structure
htb_deactivate(q, cl);
/* row/level might become empty */
// Mask this bit is 0, indicating that the level of the prio The rb tree is empty , No data available trees, return an empty packet
if ((q->row_mask[level] & (1 << prio)) == 0)
return NULL;
// Otherwise re-find the layer of the tree priority RB
next = htb_lookup_leaf(q->row[level] + prio,
prio, q->ptr[level] + prio,
q->last_ptr_id[level] + prio);
// Found in this category from the new structure began circulating cl , Nodes to find non-empty queue
if (cl == start) /* fix start if we just deleted it */
start = next;
cl = next;
// The goto formed a large loop in the small cycles , Queue length to find the type of non-empty nodes
goto next;
}
// The following is the case of non-empty queue length, running the class structure of the internal flow control nodes dequeue ,
// It depends on the node that flow control algorithm used, and if tbf Like flow restrictions can be achieved
skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
// To obtain data packets, the interrupt cycle ready to return
if (likely(skb != NULL))
break;
// Did not get the packet, print a warning message , This information is printed only once in the loop
if (!cl->warned) {
printk(KERN_WARNING
"htb: class %X isn't work conserving ?!\n",
cl->classid);
// Have been printed as a warning sign
cl->warned = 1;
}
// Increased access to an empty packet count, that from the non-working class to take the number of abnormal packet
q->nwc_hit++;
// Update tree node to the next rb
htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
ptr[0]) + prio);
// Continue to find the level of the priority categories RB tree leaf nodes to find , Cycle
cl = htb_lookup_leaf(q->row[level] + prio, prio,
q->ptr[level] + prio,
q->last_ptr_id[level] + prio);
// When you find a new starting node to the node is not cycle until a data packet, when the traverse will be back after start Node
// Cycle was interrupted
} while (cl != start);
if (likely(skb != NULL)) {
// Packets to find the situation very likely
// Calculate deficit deficit, reducing the data packet length , The deficit is initialized to 0 The
if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
// If the node type of deficit is negative, add a fixed amount of , The default queue length is the physical network card
cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
// Update tree node to the next rb , If the intermediate nodes, then update the parent node pointer to the internal structure , Otherwise,
// Update from the flow control structure to achieve the same category in different types of tree nodes in the conversion category , Not been limited to a
// Node
htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
ptr[0]) + prio);
}
// If the deficit is not the number of nodes to replace RB
/* this used to be after charge_class but this constelation
gives us slightly better performance */
// If the queue is empty, stop the class
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
// Handle the flow control node and all its parent token, adjusting the model in this category cmode
htb_charge_class(q, cl, level, skb->len);
}
return skb;
}
/**
* htb_lookup_leaf - returns next leaf class in DRR order
*
* Find leaf where current feed pointers points to.
*/
// Find the leaf node classification
static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
struct rb_node **pptr, u32 * pid)
{
int i;
struct {
// Root
struct rb_node *root;
// Parent node address
struct rb_node **pptr;
// Parent ID
u32 *pid;
// 8 Array element
} stk[TC_HTB_MAXDEPTH], *sp = stk;
BUG_TRAP(tree->rb_node);
// sp Is stk[] Pointer to the array of element 0 , Initialization
sp->root = tree->rb_node;
sp->pptr = pptr;
sp->pid = pid;
// 64K Times of the cycle, why is 64K It
for (i = 0; i < 65535; i++) {
// Parent node is empty, can be the parent ID Non-0, re-find parent node
if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover
the original or next ptr */
*sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid);
}
// Parent ID cleared
*sp->pid = 0; /* ptr is valid now so that remove this hint as it
can become out of date quickly */
// If the parent node or is empty
if (!*sp->pptr) { /* we are at right end; rewind & go up */
// Parent node is set to root
*sp->pptr = sp->root;
// Parent node is set to the lower left leaf node
while ((*sp->pptr)->rb_left)
*sp->pptr = (*sp->pptr)->rb_left;
// 0 if the elements of the array is no longer , This is the case the following code executed
if (sp > stk) {
// Move to previous element
sp--;
BUG_TRAP(*sp->pptr);
// If the element's parent node is empty, return empty , Here is the loop exit 1
if (!*sp->pptr)
return NULL;
// pptr Updated to the next node
htb_next_rb_node(sp->pptr);
}
} else {
struct htb_class *cl;
// Extraction of the parent node of node corresponding to the first prio HTB Class Structure
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
// If the leaf node, return , Here is the loop exit 2
if (!cl->level)
return cl;
// Move to next array stk
// Class with the HTB structure parameters to initialize the array of parameters to re-cycle items
(++sp)->root = cl->un.inner.feed[prio].rb_node;
sp->pptr = cl->un.inner.ptr + prio;
sp->pid = cl->un.inner.last_ptr_id + prio;
}
}
// End of the cycle did not find the right node, returns null
BUG_TRAP(0);
return NULL;
}
/**
* htb_charge_class - charges amount "bytes" to leaf and ancestors
*
* Routine assumes that packet "bytes" long was dequeued from leaf cl
* borrowing from "level". It accounts bytes to ceil leaky bucket for
* leaf and all ancestors and to rate bucket for ancestors at levels
* "level" and higher. It also handles possible change of mode resulting
* from the update. Note that mode can also increase here (MAY_BORROW to
* CAN_SEND) because we can use more precise clock that event queue here.
* In such case we remove class from event queue first.
*/
// Nodes on the type of token and update the calculation of the data buffer to adjust the model category node
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
int level, int bytes)
{
long toks, diff;
enum htb_cmode old_mode;
// HTB Macro Statistics , T: The number of tokens ; B: Cache ; R: Rate
// Calculate the number of tokens
#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
if (toks > cl->B) toks = cl->B; \
toks -= L2T(cl, cl->R, bytes); \
if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
cl->T = toks
// Cycle up to the root
while (cl) {
// Interval
diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
if (cl->level >= level) {
// High-level categories
// Increased lending
if (cl->level == level)
cl->xstats.lends++;
// Ordinary token calculate
HTB_ACCNT(tokens, buffer, rate);
} else {
// Category of low-level
// Increase in borrowing
cl->xstats.borrows++;
// Token increase
cl->tokens += diff; /* we moved t_c; update tokens */
}
// Token calculate C
HTB_ACCNT(ctokens, cbuffer, ceil);
cl->t_c = q->now;
// Save the original model category node
old_mode = cl->cmode;
diff = 0;
// Under the new token, the buffer to update the number of nodes in the model class , Because the front diff the data has been modified after a token
// So now the input value is set diff 0 , The function ends , Class model is not available when sending, diff contains the current number of tokens
// The negative
htb_change_class_mode(q, cl, &diff);
// If the type of pattern has changed
if (old_mode != cl->cmode) {
// If the old mode is not the model can be directly (HTB_CAN_SEND), RB tree waiting for instructions , Removed from the RB tree
if (old_mode != HTB_CAN_SEND)
htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
// New model can not be made if the current model directly (HTB_CAN_SEND), RB attached to the tree waiting for the right
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
#ifdef HTB_RATECM
// Update rate counter : Packets, bytes
/* update rate counters */
cl->sum_bytes += bytes;
cl->sum_packets++;
#endif
/* update byte stats except for leaves which are already updated */
// If the intermediate node updates its statistics , Because the leaf node has a packet when the team dealt with the
if (cl->level) {
cl->bstats.bytes += bytes;
cl->bstats.packets++;
}
cl = cl->parent;
}
}
/**
* htb_change_class_mode - changes classe's mode
*
* This should be the only way how to change classe's mode under normal
* cirsumstances. Routine will update feed lists linkage, change mode
* and add class to the wait event queue if appropriate. New mode should
* be different from old one and cl->pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
*/
// Adjust the sending node model category
static void
htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
{
// Changes in value according to a new model
enum htb_cmode new_mode = htb_class_mode(cl, diff);
// Model has not changed, return
if (new_mode == cl->cmode)
return;
// cl->prio_activity Non-0 means the node is active , Need to stop and then update the model
if (cl->prio_activity) { /* not necessary: speed optimization */
// If the original model can transmit data, first stop the node
if (cl->cmode != HTB_CANT_SEND)
htb_deactivate_prios(q, cl);
// Update mode
cl->cmode = new_mode;
// If the new mode is not prohibited to send, re-activation node
if (new_mode != HTB_CANT_SEND)
htb_activate_prios(q, cl);
} else
// Non-active category node, the value of direct update mode
cl->cmode = new_mode;
}
/**
* htb_class_mode - computes and returns current class mode
*
* It computes cl's mode at time cl->t_c+diff and returns it. If mode
* is not HTB_CAN_SEND then cl->pq_key is updated to time difference
* from now to time when cl will change its state.
* Also it is worth to note that class mode doesn't change simply
* at cl->{c,}tokens == 0 but there can rather be hysteresis of
* 0 .. -cl->{c,}buffer range. It is meant to limit number of
* mode transitions per time unit. The speed gain is about 1/6.
*/
// Category node model calculation
static inline enum htb_cmode
htb_class_mode(struct htb_class *cl, long *diff)
{
long toks;
// Ceil token calculate class
if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
// If the token is less than the lower limit, mode is not sent
*diff = -toks;
return HTB_CANT_SEND;
}
// Ordinary token calculate class
// If the token is greater than the upper limit, mode can be sent
if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
return HTB_CAN_SEND;
*diff = -toks;
// Otherwise the model may, by
return HTB_MAY_BORROW;
}
/**
* htb_add_to_wait_tree - adds class to the event queue with delay
*
* The class is added to priority event queue to indicate that class will
* change its mode in cl->pq_key microseconds. Make sure that class is not
* already in the queue.
*/
// Add to wait for the type of tree node
static void htb_add_to_wait_tree(struct htb_sched *q,
struct htb_class *cl, long delay)
{
// p Initialize the root of the number of layers to wait
struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
// Event delay time, increase the delay time delay The number of conversion jiffie
cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
// If the delay is 0, Time by at least one jiffie
if (cl->pq_key == q->jiffies)
cl->pq_key++;
/* update the nearest event cache */
// near_ev_cache[] Save time the recent occurrence of an event, if the new calculated pq_key Than near_ev_cache[]
// More recent, the near_ev_cache[] Update event time indicated bit pq_key
if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
q->near_ev_cache[cl->level] = cl->pq_key;
// According to the delay time will be inserted into the delayed category node tree right place RB , Time of occurrence of the tree are sorted according to time
// Take place as early as the left node, right node take place later
while (*p) {
// Find the appropriate insertion point cycle
struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, pq_node);
if (time_after_eq(cl->pq_key, c->pq_key))
p = &parent->rb_right;
else
p = &parent->rb_left;
}
// Cl the class into the waiting tree
rb_link_node(&cl->pq_node, parent, p);
rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
}
/**
* htb_do_events - make mode changes to classes at the level
*
* Scans event queue for pending events and applies them. Returns jiffies to
* next pending event (0 for no event in pq).
* Note: Aplied are events whose have cl->pq_key <= jiffies.
*/
// No waiting on the first level tree node mode to adjust the class
static long htb_do_events(struct htb_sched *q, int level)
{
int i;
// 500 cycles , Why 500? That up to the tree 500 Nodes? Corresponding 500 Events
for (i = 0; i < 500; i++) {
struct htb_class *cl;
long diff;
// Rb tree waiting to take the first node , Each loop to remove a node from the tree
struct rb_node *p = rb_first(&q->wait_pq[level]);
// No node, the event space , Returns 0 for no delay
if (!p)
return 0;
// Get the node corresponding to the HTB class
cl = rb_entry(p, struct htb_class, pq_node);
// This category is the processing time delay after the current time to return to the time difference as a delay value
if (time_after(cl->pq_key, q->jiffies)) {
return cl->pq_key - q->jiffies;
}
// Time is less than the current time, and has been timed out
// Safely wait for the node from the tree off RB
htb_safe_rb_erase(p, q->wait_pq + level);
// Check the current time and time difference
diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
// Categories according to the time change the mode of difference
htb_change_class_mode(q, cl, &diff);
// If not, can send mode, re-insert the back waiting for the tree
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
// More than 500 events ,
if (net_ratelimit())
printk(KERN_WARNING "htb: too many events !\n");
// Back to 0.1 seconds of delay
return HZ / 10;
}
// HTB Delay in processing
static void htb_delay_by(struct Qdisc *sch, long delay)
{
struct htb_sched *q = qdisc_priv(sch);
// Delay is at least one time slice , 1/HZ Seconds
if (delay <= 0)
delay = 1;
// The maximum delay is 5 seconds
if (unlikely(delay > 5 * HZ)) {
if (net_ratelimit())
printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
delay = 5 * HZ;
}
// Modify the timer
/* why don't use jiffies here ? because expires can be in past */
mod_timer(&q->timer, q->jiffies + delay);
// Blocking flag set
sch->flags |= TCQ_F_THROTTLED;
// Statistical increase in traffic overflow
sch->qstats.overlimits++;
}
HTB Flow control tokens to reflect changes in the calculation by the model type of the node, if it is CAN_SEND Can continue to send data, the node can remain in the data packet to provide tree ; If the type of node will be blocked CANT_SEND packet provided from the removal of the tree can not contract ; If the model is CAN_BORROW of the situation based on the bandwidth of other nodes to determine whether packets remain in the tree and continue to provide contract , Not provide the tree, even if the packet can only block a can not send , This realization of the flow control management .
To understand the flow control algorithm for categorical meaning of the various parameters, it is best to take a look RFC 3290.
...... Continued ......

Linux kernel traffic control (18) Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, for any commercial purposes is strictly prohibited. msn: [email protected] Source:

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, Used for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn 1. Pr

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, Used for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn 5.2 F

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, Used for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn 5.4 P

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, Used for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn 5.11

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn 6. Class a

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, Used for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn 5. Co

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, Used for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn 5.8 G

Copyleft this document owned by yfydz all, the use of GPL, free to copy, reprint, reproduced keep the documents for completeness, Used for any commercial purposes is strictly prohibited. msn: [email protected] Source: http://yfydz.cublog.cn 5.11.