на тему рефераты Информационно-образоательный портал
Рефераты, курсовые, дипломы, научные работы,
на тему рефераты
на тему рефераты
МЕНЮ|
на тему рефераты
поиск
Мониторинг виртуальной памяти в ОС Linux
tatic int ev_end;

/* Whether there was ringbuffer overflow */

static int ev_ovf = 0;

DECLARE_WAIT_QUEUE_HEAD (ev_waitq);

spinlock_t ev_lock = SPIN_LOCK_UNLOCKED;

/* Damn seq_file doesn't update file pos when we return NULL iterator,

* so we first return this one and then NULL on next seqnext() call */

static void *dummy_ptr = &dummy_ptr;

/*** Entry points ***/

/*

* open() handler

*/

static int events_open (struct inode *i, struct file *filp)

{

int ret;

/*

* Ringbuffer is not seekable

*/

nonseekable_open (i, filp);

/*

* Open seq_file and set its initial pos

*/

ret = seq_open (filp, &events_seqop);

if (! ret)

{

struct seq_file *m = filp->private_data;

m->private = filp;

m->index = ev_start;

}

return ret;

}

/*

* poll/epoll() handler

*/

static unsigned events_poll (struct file *filp, struct poll_table_struct *pt)

POLLRDNORM;

spin_unlock (&ev_lock);

return mask;

/*

* Called by seq_file within read() request

*/

static void *events_seqstart (struct seq_file *m, loff_t *pos)

{

struct file *filp = m->private;

spin_lock (&ev_lock);

/*

* Wait for data become available

*/

while (*pos == (loff_t) ev_end)

{

void *err = NULL;

/* Can't schedule while atomic */

spin_unlock (&ev_lock);

if (filp->f_flags & O_NONBLOCK)

err = ERR_PTR(-EAGAIN);

else if (wait_event_interruptible (ev_waitq, *pos!= (loff_t) ev_end))

err = ERR_PTR(-ERESTARTSYS);

/*

* There IS a slim chance, that we loose waiting condition

* between awakening and acquiring spinlock - hence while() loop

*/

spin_lock (&ev_lock);

if (err)

return err;

}

return events + *pos;

}

/*

* Finish read() request

*/

static void events_seqstop (struct seq_file *m, void *p)

{

spin_unlock (&ev_lock);

}

/*

* Iterate to next event

*/

static void *events_seqnext (struct seq_file *m, void *p, loff_t *pos)

{

struct memmon_event *ev;

/* Dummy iterator - time to exit */

if (p == dummy_ptr)

return NULL;

++*pos;

ev = events + *pos;

/* Overflow */

if (ev - events > NEVENTS)

*pos = 0;

/*

* We reached end. Decrement file pos ('coz it will be incremented then back)

* and return dummy iterator (otherwise file pos won't be updated at all)

*/

if (*pos == (loff_t) ev_end)

{

-*pos;

return dummy_ptr;

}

return events + *pos;

}

/*

* Actually prints current iterator to read buffer

*/

static int events_seqprint (struct seq_file *m, void *p)

{

struct memmon_event *ev = p;

if (ev == dummy_ptr)

return 0;

seq_printf (m, «%d:», ev->pid);

switch (ev->type)

case MMAP2:

seq_printf (m, «mmap (%p,%u,», ev->mmap2.start, ev->mmap2.len);

if (ev->mmap2.prot & PROT_READ)

seq_puts (m, «r»);

else

seq_puts (m, «-»);

if (ev->mmap2.prot & PROT_WRITE)

seq_puts (m, «w»);

else

seq_puts (m, «-»);

if (ev->mmap2.prot & PROT_EXEC)

seq_puts (m, «x,»);

else

seq_puts (m, «-,»);

if (ev->mmap2.flags & MAP_SHARED)

seq_puts (m, «SHARED»);

else if (ev->mmap2.flags & MAP_PRIVATE)

seq_puts (m, «PRIVATE»);

if (ev->mmap2.flags & MAP_LOCKED)

seq_puts (m, «

return 0;

}

/*** Exported entries ***/

/*

* Initializes event ringbuffer & creates /proc entry

*/

int init_events(void)

{

struct proc_dir_entry *entry;

buflen = max (buflen, MIN_EVENTS_BUFLEN);

events = kzalloc (buflen, GFP_KERNEL);

if (! events)

{

printk («memmon: Event ringbuffer too big!\n»);

return 0;

}

ev_start = ev_end = 0;

entry = create_proc_entry (EVENTS_ENTRY, 0444, procdir);

if (entry)

entry->proc_fops = &events_fops;

else

{

kfree(events);

return 0;

}

return 1;

}

/*

* Destroys ringbuffer & removes /proc entry

*/

void fini_events(void)

{

remove_proc_entry (EVENTS_ENTRY, procdir);

kfree(events);

}

/*

* Adds events to ringbuffer tail

*/

void put_event (const struct memmon_event *ev)

{

spin_lock (&ev_lock);

events [ev_end] = *ev;

/* Overflow */

if (++ev_end > NEVENTS)

{

ev_start = ev_end = 0;

ev_ovf = 1;

}

/*

* If overflow happened at least once, ev_start must be next to ev_end.

* Otherwise, it remains zero.

*/

if (ev_ovf && ++ev_start > NEVENTS)

ev_start = 0;

spin_unlock (&ev_lock);

wake_up_interruptible_sync (&ev_waitq);

}

watch-pids.h

/*

* Selection of PIDs to watch for.

*/

#ifndef MEMMON_WATCH_PIDS_H

#define MEMMON_WATCH_PIDS_H

/*

* Checks whether PID @pid is present in PID set

* Returns 1 if present

*/

int pid_present (pid_t pid);

/*

* Initializes PID set & creates /proc entry

*/

int init_watch_pids(void);

/*

* Destroys PID set & removes /proc entry

*/

void fini_watch_pids(void);

#endif // MEMMON_WATCH_PIDS_H

watch-pids.c

/*

* Selection of PIDs to watch for.

*/

#include <linux/module.h>

#include <linux/moduleparam.h>

#include <linux/kernel.h>

#include <linux/proc_fs.h>

#include <linux/bitmap.h>

#include <asm/uaccess.h>

#include <asm/bitops.h>

#include «common.h»

#include «watch-pids.h»

/*** Forward declarations ***/

static int watch_pids_open (struct inode *i, struct file *filp);

static int watch_pids_release (struct inode *i, struct file *filp);

static ssize_t watch_pids_read (struct file *filp, char __user *buf, size_t count, loff_t *off);

static ssize_t watch_pids_write (struct file *filp, const char __user *buf,

size_t count, loff_t *offp);

/*** Internal data ***/

/* Filename in procfs directory */

#define WATCHPID_ENTRY «watch-pids»

#define PID_COUNT PID_MAX_DEFAULT + 1

/* PIDs are stored in one single bitmap for 8192 entries

* This is VERY RARELY unacceptable */

static DECLARE_BITMAP (watched_pids, PID_COUNT);

/*** File operations ***/

static const struct file_operations watch_pids_fops =

{

owner = THIS_MODULE,

open = watch_pids_open,

read = watch_pids_read,

write = watch_pids_write,

release = watch_pids_release

};

/*** Entry points ***/

/*

* open() handler

*/

static int watch_pids_open (struct inode *i, struct file *filp)

{

try_module_get (THIS_MODULE);

/*

* If file opened for read, print PID set to internal buffer

*/

if (filp->f_mode & FMODE_READ)

{

const int FDATA_SIZ = 32*1024;

char *fdata;

int len;

/*

* Disallow mixed RW-access

*/

if (filp->f_mode & FMODE_WRITE)

return - EINVAL;

fdata = kzalloc (FDATA_SIZ, GFP_KERNEL);

len = bitmap_scnlistprintf (fdata, FDATA_SIZ - 1,

watched_pids, PID_COUNT);

/* Append \n */

if (len)

{

fdata [len++] = '\n';

fdata[len] = 0;

}

filp->private_data = fdata;

}

return 0;

}

/*

* close() handler

*/

static int watch_pids_release (struct inode *i, struct file *filp)

{

module_put (THIS_MODULE);

if (filp->private_data)

kfree (filp->private_data);

return 0;

}

/*

* read() handler - simply return chunk of data from

* previously allocated and formatted buffer

*/

static ssize_t watch_pids_read (struct file *filp, char __user *buf,

size_t count, loff_t *offp)

{

size_t len = strlen (filp->private_data);

char *fdata = filp->private_data;

if (*offp >= len)

return 0;

len = min (count, len - (size_t) (*offp));

if (copy_to_user (buf, fdata + (*offp), len))

return - EFAULT;

*offp += len;

return len;

}

/*

* write() handler

* Buffer must hold ASCII representation of single integer

* if positive, it's value is PID to add to set

* if negative, it's absolute value is PID to remove from set

* if zero, PID set is cleared

*/

static ssize_t watch_pids_write (struct file *filp, const char __user *buf,

size_t count, loff_t *offp)

{

const size_t maxlen = 4096;

size_t len;

pid_t new_pid;

char *data;

ssize_t res = - ENOMEM;

/* copy up to one page to our buffer */

len = min (maxlen, count);

data = kzalloc (len, GFP_KERNEL);

if (unlikely(! data))

return - ENOMEM;

if (copy_from_user (data, buf, len))

res = - EFAULT;

else if ((sscanf (data, «%d», &new_pid) == 1) &&

new_pid <= PID_COUNT && new_pid >= - PID_COUNT)

{

if (new_pid > 0)

set_bit (new_pid, watched_pids);

else if (new_pid < 0)

clear_bit (-new_pid, watched_pids);

else

bitmap_zero (watched_pids, PID_COUNT);

res = len;

}

else

/* buffer doesn't represent a number in PID range */

res = - EIO;

kfree(data);

return res;

}

/*** Exported entries ***/

/*

* Checks whether PID @pid is present in PID set

* Returns 1 if present

*/

int pid_present (pid_t pid)

if (pid > PID_COUNT

/*

* Initializes PID set & creates /proc entry

*/

int init_watch_pids(void)

{

struct proc_dir_entry *entry;

entry = create_proc_entry (WATCHPID_ENTRY, 0666, procdir);

if (entry)

entry->proc_fops = &watch_pids_fops;

else

return 0;

bitmap_zero (watched_pids, PID_COUNT);

return 1;

}

/*

* Destroys PID set & removes /proc entry

*/

void fini_watch_pids(void)

{

remove_proc_entry (WATCHPID_ENTRY, procdir);

}

syscalls.h

/*

* Syscall capture facility.

*/

#ifndef MEMMON_SYSCALLS_H

#define MEMMON_SYSCALLS_H

/*

* Installs handlers.

*/

int capture_syscalls(void);

/*

* Uninstalls handlers

*/

void restore_syscalls(void);

#endif //MEMMON_SYSCALLS_H

syscalls.c

/*

* Syscall capture facility.

*/

#include <linux/module.h>

#include <linux/moduleparam.h>

#include <linux/kernel.h>

#include <linux/proc_fs.h>

#include «common.h»

#include «syscalls.h»

#include «events.h»

#include «watch-pids.h»

/*** Syscalls ***/

/*

* They just put an appropriate event into ringbuffer

*/

asmlinkage void sys2_mmap2 (void __user *start, size_t length,

unsigned long prot, unsigned long flags,

unsigned long fd, unsigned long pgoff)

{

struct memmon_event ev = {.type = MMAP2.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.mmap2.start = start;

ev.mmap2.len = length;

ev.mmap2.prot = prot >> 3;

ev.mmap2.flags = flags;

ev.mmap2.fd = fd;

ev.mmap2.off = pgoff;

put_event(&ev);

}

asmlinkage void sys2_mmap2_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «mmap»;

ev.callret.ret = ret;

put_event(&ev);

}

asmlinkage void sys2_munmap (void __user *start, size_t length)

{

struct memmon_event ev = {.type = MUNMAP.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.munmap.start = start;

ev.munmap.len = length;

put_event(&ev);

}

asmlinkage void sys2_munmap_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «munmap»;

ev.callret.ret = ret;

put_event(&ev);

}

asmlinkage void sys2_mremap (void __user *addr1, size_t length1,

unsigned long length2, unsigned long flags,

void __user *addr2)

{

struct memmon_event ev = {.type = MREMAP.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.mremap.start[0] = addr1;

ev.mremap.start[1] = addr2;

ev.mremap.len[0] = length1;

ev.mremap.len[1] = length2;

ev.mremap.flags = flags;

put_event(&ev);

}

asmlinkage void sys2_mremap_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «mremap»;

ev.callret.ret = ret;

put_event(&ev);

}

asmlinkage void sys2_mlock (void __user *start, size_t length)

{

struct memmon_event ev = {.type = MLOCK.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.mlock.start = start;

ev.mlock.len = length;

put_event(&ev);

}

asmlinkage void sys2_mlock_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «mlock»;

ev.callret.ret = ret;

put_event(&ev);

}

asmlinkage void sys2_munlock (void __user *start, size_t length)

{

struct memmon_event ev = {.type = MUNLOCK.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.munlock.start = start;

ev.munlock.len = length;

put_event(&ev);

}

asmlinkage void sys2_munlock_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «munlock»;

ev.callret.ret = ret;

put_event(&ev);

}

asmlinkage void sys2_mlockall (unsigned long flags)

{

struct memmon_event ev = {.type = MLOCKALL.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.mlockall.flags = flags;

put_event(&ev);

}

asmlinkage void sys2_mlockall_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «mlockall»;

ev.callret.ret = ret;

put_event(&ev);

}

asmlinkage void sys2_munlockall()

{

struct memmon_event ev = {.type = MUNLOCKALL.pid = current->pid};

if (! pid_present (ev.pid)) return;

put_event(&ev);

}

asmlinkage void sys2_munlockall_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «munlockall»;

ev.callret.ret = ret;

put_event(&ev);

}

asmlinkage void sys2_brk (void __user *start)

{

struct memmon_event ev = {.type = BRK.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.brk.addr = start;

put_event(&ev);

}

asmlinkage void sys2_brk_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «brk»;

ev.callret.ret = ret;

put_event(&ev);

}

asmlinkage void sys2_fsync (int fd)

{

struct memmon_event ev = {.type = FSYNC.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.fsync.fd = fd;

put_event(&ev);

}

asmlinkage void sys2_fsync_exit (long ret)

{

struct memmon_event ev = {.type = SYSCALLRET.pid = current->pid};

if (! pid_present (ev.pid)) return;

ev.callret.callname = «fsync»;

ev.callret.ret = ret;

put_event(&ev);

}

/*** Handler tables ***/

/* Kernel syscall table */

extern void *sys_call_table[];

/* Our table w/saved offsets */

void *old_sys_call [NR_syscalls];

/* Our pre-call handlers */

void *sys_call_trap [NR_syscalls];

/* Our post-call handlers */

void *sys_call_exit [NR_syscalls];

/*

* Struct describind our handler

*/

struct syscall_handler

{

/* Syscall nr */

int nr;

/* Pre-call & post-call handler */

void *hand1, *hand2;

};

#define SYSCALL_HANDLER(name) {__NR_##name, sys2_##name, sys2_##name##_exit}

#define SYSCALL_HANDLERS_END() {0, 0, 0}

/*

* Main handler table

* Each SYSCALL_HANDLER(name) entry installs handlers

* «sys2_name/sys2_name_exit for sys_name call.

*/

struct syscall_handler syscalls[] =

{

SYSCALL_HANDLER(mmap2),

SYSCALL_HANDLER(munmap),

SYSCALL_HANDLER(mremap),

SYSCALL_HANDLER(mlock),

SYSCALL_HANDLER(munlock),

SYSCALL_HANDLER(mlockall),

SYSCALL_HANDLER(munlockall),

SYSCALL_HANDLER(brk),

SYSCALL_HANDLER(fsync),

SYSCALL_HANDLERS_END()

};

/* Located in syscall-entry.S */

void syscalls_entry(void);

/*** Exported entries ***/

/*

* Installs handlers.

*/

int capture_syscalls(void)

{

int i;

for (i = 0; syscalls[i].hand1; ++i)

{

int nr = syscalls[i].nr;

sys_call_trap[nr] = syscalls[i].hand1;

sys_call_exit[nr] = syscalls[i].hand2;

old_sys_call[nr] = sys_call_table[nr];

sys_call_table[nr] = syscalls_entry;

}

return 1;

}

/*

* Uninstalls handlers

*/

void restore_syscalls(void)

{

int i;

for (i = 0; syscalls[i].hand1; ++i)

{

int nr = syscalls[i].nr;

sys_call_table[nr] = old_sys_call[nr];

}

}

syscalls-entry.S

/*

* Syscall entry/exit capture

*/

#include «offsets.h»

/* Entry handler table */

extern sys_call_trap

/* Exit handler table */

extern sys_call_exit

/* Global entry for our syscalls */

syscalls_entry:

/* Save registers in order syscall handlers expect 'em */

pushl %eax

pushl %ebp

pushl %edi

pushl %esi

pushl %edx

pushl %ecx

pushl %ebx

/* Save eax */

movl %eax, TI_stk0 (%ebp)

/* Call our handler */

call *sys_call_trap (,%eax, 4)

/* Fake return address */

movl 28 (%esp),%eax

movl %eax, TI_stk0 + 4 (%ebp)

movl $sysreturn, 28 (%esp)

/* Restore context */

popl %ebx

popl %ecx

popl %edx

popl %esi

popl %edi

popl %ebp

popl %eax

/* Jump to default system handler */

jmpl *old_sys_call (,%eax, 4)

sysreturn:

/* Save registers */

pushal

/* Pass new% eax to exit handler */

pushl %eax

/* Restore original% eax */

movl TI_stk0 (%ebp),%eax

/* Call our exit handler */

call *sys_call_exit (,%eax, 4)

/* Restore context */

popl %eax

popal

/* Jump back to syscall dispatcher entry */

jmpl *TI_stk0 + 4 (%ebp)

globl syscalls_entry

gen-offsets.c

//

#define __KERNEL__

/* bugoga */

#include <linux/kernel.h>

#include <linux/autoconf.h>

#include <linux/thread_info.h>

#include <stdio.h>

int main()

{

printf («#define TI_stk0% d\n», offsetof (struct thread_info, supervisor_stack));

return 0;

}

mm-fault.h

/*

* Pagefault interception.

*/

#ifndef MEMMON_MM_FAULT_H

#define MEMMON_MM_FAULT_H

/*

* Install pagefault handler

*/

void capture_mmfault(void);

/*

* Uninstall handler

*/

void release_mmfault(void);

#endif // MEMMON_MM_FAULT_H

mm-fault.c

/*

* Pagefault interception.

*/

#include <linux/module.h>

#include <linux/moduleparam.h>

#include <linux/kernel.h>

#include <linux/mm.h>

#include «common.h»

#include «mm-fault.h»

#include «events.h»

#include «watch-pids.h»

/*

* Dirty kernel hack: PF hook that is called every time

* some process PF's for some page that BELONGS to his VMA space.

*/

extern void (*mm_handle_fault_hook) (struct mm_struct *mm, struct vm_area_struct *vma,

void __user *address, pte_t *pte,

pmd_t *pmd, int write_access);

/*

* Pagefault handler

*/

void mm_handle_fault (struct mm_struct *mm, struct vm_area_struct *vma,

void __user *address, pte_t *pte,

pmd_t *pmd, int write_access)

{

struct memmon_event ev = {.pid = current->pid};

pte_t entry = *pte;

/*

* If PF happened due to R/W or U/S access violation, ignore it

*/

if (! pid_present (current->pid) || pte_present(entry))

return;

/*

* Faulted page is either backed by swapfile, some shared executable file

* or no file yet at all (anonymous page)

*/

if (pte_none(entry))

ev.type = ANON_PF;

else if (pte_file(entry))

ev.type = FILE_PF;

else

ev.type = SWAP_PF;

ev.pagefault.addr = address;

ev.pagefault.write = write_access;

put_event(&ev);

}

/*** Exported entries ***/

/*

* Install pagefault handler

*/

void capture_mmfault(void)

{

mm_handle_fault_hook = mm_handle_fault;

}

/*

* Uninstall handler

*/

void release_mmfault(void)

{

mm_handle_fault_hook = NULL;

}

common.h

/*

* Common defines and global data

*/

#ifndef MEMMON_COMMON_H

#define MEMMON_COMMON_H

/* procfs directory name */

#define PROCDIR «memmon»

/*

* procfs directory entry

*/

extern struct proc_dir_entry *procdir;

#endif // MEMMON_COMMON_H

Makefile

#

ifneq ($(KERNELRELEASE),)

obj-m:= memmon.o

memmon-objs:= mmon.o events.o watch-pids.o syscalls.o syscalls-entry.o mm-fault.o

else

KERNELDIR?= /lib/modules/$(shell uname - r)/build

PWD:= $(shell pwd)

all: offsets.h modules

offsets.h: $(KERNELDIR)/include/asm/thread_info.h

$(MAKE) gen-offsets

gen-offsets > offsets.h

$(RM) gen-offsets

clean modules:

$(MAKE) - C $(KERNELDIR) M=$(PWD) $(MAKECMDGOALS)

PHONY: modules.DEFAULT all

endif

Испрвление для для ядра (2.6.20.1)

diff - arNC 3 linux_2.6.20.1_j/kernel/kallsyms.c linux_2.6.20.1_a/kernel/kallsyms.c

*** linux_2.6.20.1_j/kernel/kallsyms.c 2007-02-20 09:34:32.000000000 +0300

- linux_2.6.20.1_a/kernel/kallsyms.c 2007-05-26 22:27:23.000000000 +0400

***************

*** 452,454 ****

- 452,460 -

__initcall (kallsyms_init);

EXPORT_SYMBOL (__print_symbol);

+

+ /* HACK */

+

+ extern void *sys_call_table[];

+

+ EXPORT_SYMBOL_GPL (sys_call_table);

diff - arNC 3 linux_2.6.20.1_j/mm/memory.c linux_2.6.20.1_a/mm/memory.c

*** linux_2.6.20.1_j/mm/memory.c 2007-02-20 09:34:32.000000000 +0300

- linux_2.6.20.1_a/mm/memory.c 2007-05-28 22:08:41.000000000 +0400

***************

*** 2369,2374 ****

- 2378,2390 -

return VM_FAULT_MAJOR;

}

+ /* DIRTY HACK */

+ void (*mm_handle_fault_hook) (struct mm_struct *mm,

+ struct vm_area_struct *vma, unsigned long address,

+ pte_t *pte, pmd_t *pmd, int write_access) = NULL;

+

+ EXPORT_SYMBOL_GPL (mm_handle_fault_hook);

+

/*

* These routines also need to handle stuff like marking pages dirty

* and/or accessed for architectures that don't do it in hardware (most

***************

*** 2390,2395 ****

- 2406,2414 -

pte_t old_entry;

spinlock_t *ptl;

+ if (mm_handle_fault_hook)

+ mm_handle_fault_hook (mm, vma, address, pte, pmd, write_access);

+

old_entry = entry = *pte;

if (! pte_present(entry)) {

if (pte_none(entry)) {

Страницы: 1, 2, 3, 4



© 2003-2013
Рефераты бесплатно, курсовые, рефераты биология, большая бибилиотека рефератов, дипломы, научные работы, рефераты право, рефераты, рефераты скачать, рефераты литература, курсовые работы, реферат, доклады, рефераты медицина, рефераты на тему, сочинения, реферат бесплатно, рефераты авиация, рефераты психология, рефераты математика, рефераты кулинария, рефераты логистика, рефераты анатомия, рефераты маркетинг, рефераты релиния, рефераты социология, рефераты менеджемент.