Project

General

Profile

Bug #781 » fix-modules-strcpy.patch

Mathieu Desnoyers, 04/14/2014 03:20 PM

View differences:

lib/ringbuffer/backend.h
165 165
	ctx->buf_offset += len;
166 166
}
167 167

  
168
/*
169
 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
170
 * terminating character is found in @src. Returns the number of bytes
171
 * copied. Does *not* terminate @dest with NULL terminating character.
172
 */
173
static inline
174
size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config,
175
		char *dest, const char *src, size_t len)
176
{
177
	size_t count;
178

  
179
	for (count = 0; count < len; count++) {
180
		char c;
181

  
182
		/*
183
		 * Only read source character once, in case it is
184
		 * modified concurrently.
185
		 */
186
		c = ACCESS_ONCE(src[count]);
187
		if (!c)
188
			break;
189
		lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
190
	}
191
	return count;
192
}
193

  
194
/*
195
 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
196
 * terminating character is found in @src, or when a fault occurs.
197
 * Returns the number of bytes copied. Does *not* terminate @dest with
198
 * NULL terminating character.
199
 *
200
 * This function deals with userspace pointers, it should never be called
201
 * directly without having the src pointer checked with access_ok()
202
 * previously.
203
 */
204
static inline
205
size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
206
		char *dest, const char __user *src, size_t len)
207
{
208
	size_t count;
209

  
210
	for (count = 0; count < len; count++) {
211
		int ret;
212
		char c;
213

  
214
		ret = __get_user(c, &src[count]);
215
		if (ret || !c)
216
			break;
217
		lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
218
	}
219
	return count;
220
}
221

  
222
/**
223
 * lib_ring_buffer_strcpy - write string data to a buffer backend
224
 * @config : ring buffer instance configuration
225
 * @ctx: ring buffer context. (input arguments only)
226
 * @src : source pointer to copy from
227
 * @len : length of data to copy
228
 * @pad : character to use for padding
229
 *
230
 * This function copies @len - 1 bytes of string data from a source
231
 * pointer to a buffer backend, followed by a terminating '\0'
232
 * character, at the current context offset. This is more or less a
233
 * buffer backend-specific strncpy() operation. If a terminating '\0'
234
 * character is found in @src before @len - 1 characters are copied, pad
235
 * the buffer with @pad characters (e.g. '#'). Calls the slow path
236
 * (_ring_buffer_strcpy) if copy is crossing a page boundary.
237
 */
238
static inline
239
void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
240
			   struct lib_ring_buffer_ctx *ctx,
241
			   const char *src, size_t len, int pad)
242
{
243
	struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
244
	struct channel_backend *chanb = &ctx->chan->backend;
245
	size_t sbidx, index;
246
	size_t offset = ctx->buf_offset;
247
	ssize_t pagecpy;
248
	struct lib_ring_buffer_backend_pages *rpages;
249
	unsigned long sb_bindex, id;
250

  
251
	if (unlikely(!len))
252
		return;
253
	offset &= chanb->buf_size - 1;
254
	sbidx = offset >> chanb->subbuf_size_order;
255
	index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
256
	pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
257
	id = bufb->buf_wsb[sbidx].id;
258
	sb_bindex = subbuffer_id_get_index(config, id);
259
	rpages = bufb->array[sb_bindex];
260
	CHAN_WARN_ON(ctx->chan,
261
		     config->mode == RING_BUFFER_OVERWRITE
262
		     && subbuffer_id_is_noref(config, id));
263
	if (likely(pagecpy == len)) {
264
		size_t count;
265

  
266
		count = lib_ring_buffer_do_strcpy(config,
267
					rpages->p[index].virt
268
					    + (offset & ~PAGE_MASK),
269
					src, len - 1);
270
		offset += count;
271
		/* Padding */
272
		if (unlikely(count < len - 1)) {
273
			size_t pad_len = len - 1 - count;
274

  
275
			lib_ring_buffer_do_memset(rpages->p[index].virt
276
						+ (offset & ~PAGE_MASK),
277
					pad, pad_len);
278
			offset += pad_len;
279
		}
280
		/* Ending '\0' */
281
		lib_ring_buffer_do_memset(rpages->p[index].virt
282
					+ (offset & ~PAGE_MASK),
283
				'\0', 1);
284
	} else {
285
		_lib_ring_buffer_strcpy(bufb, offset, src, len, 0, pad);
286
	}
287
	ctx->buf_offset += len;
288
}
289

  
168 290
/**
169 291
 * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
170 292
 * @config : ring buffer instance configuration
......
239 361
	_lib_ring_buffer_memset(bufb, offset, 0, len, 0);
240 362
}
241 363

  
364
/**
365
 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
366
 * @config : ring buffer instance configuration
367
 * @ctx: ring buffer context (input arguments only)
368
 * @src : userspace source pointer to copy from
369
 * @len : length of data to copy
370
 * @pad : character to use for padding
371
 *
372
 * This function copies @len - 1 bytes of string data from a userspace
373
 * source pointer to a buffer backend, followed by a terminating '\0'
374
 * character, at the current context offset. This is more or less a
375
 * buffer backend-specific strncpy() operation. If a terminating '\0'
376
 * character is found in @src before @len - 1 characters are copied, pad
377
 * the buffer with @pad characters (e.g. '#'). Calls the slow path
378
 * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
379
 * boundary. Disable the page fault handler to ensure we never try to
380
 * take the mmap_sem.
381
 */
382
static inline
383
void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
384
		struct lib_ring_buffer_ctx *ctx,
385
		const void __user *src, size_t len, int pad)
386
{
387
	struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
388
	struct channel_backend *chanb = &ctx->chan->backend;
389
	size_t sbidx, index;
390
	size_t offset = ctx->buf_offset;
391
	ssize_t pagecpy;
392
	struct lib_ring_buffer_backend_pages *rpages;
393
	unsigned long sb_bindex, id;
394
	mm_segment_t old_fs = get_fs();
395

  
396
	if (unlikely(!len))
397
		return;
398
	offset &= chanb->buf_size - 1;
399
	sbidx = offset >> chanb->subbuf_size_order;
400
	index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
401
	pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
402
	id = bufb->buf_wsb[sbidx].id;
403
	sb_bindex = subbuffer_id_get_index(config, id);
404
	rpages = bufb->array[sb_bindex];
405
	CHAN_WARN_ON(ctx->chan,
406
		     config->mode == RING_BUFFER_OVERWRITE
407
		     && subbuffer_id_is_noref(config, id));
408

  
409
	set_fs(KERNEL_DS);
410
	pagefault_disable();
411
	if (unlikely(!access_ok(VERIFY_READ, src, len)))
412
		goto fill_buffer;
413

  
414
	if (likely(pagecpy == len)) {
415
		size_t count;
416

  
417
		count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
418
					rpages->p[index].virt
419
					    + (offset & ~PAGE_MASK),
420
					src, len - 1);
421
		offset += count;
422
		/* Padding */
423
		if (unlikely(count < len - 1)) {
424
			size_t pad_len = len - 1 - count;
425

  
426
			lib_ring_buffer_do_memset(rpages->p[index].virt
427
						+ (offset & ~PAGE_MASK),
428
					pad, pad_len);
429
			offset += pad_len;
430
		}
431
		/* Ending '\0' */
432
		lib_ring_buffer_do_memset(rpages->p[index].virt
433
					+ (offset & ~PAGE_MASK),
434
				'\0', 1);
435
	} else {
436
		_lib_ring_buffer_strcpy_from_user_inatomic(bufb, offset, src,
437
					len, 0, pad);
438
	}
439
	pagefault_enable();
440
	set_fs(old_fs);
441
	ctx->buf_offset += len;
442

  
443
	return;
444

  
445
fill_buffer:
446
	pagefault_enable();
447
	set_fs(old_fs);
448
	/*
449
	 * In the error path we call the slow path version to avoid
450
	 * the pollution of static inline code.
451
	 */
452
	_lib_ring_buffer_memset(bufb, offset, pad, len - 1, 0);
453
	offset += len - 1;
454
	_lib_ring_buffer_memset(bufb, offset, '\0', 1, 0);
455
}
456

  
242 457
/*
243 458
 * This accessor counts the number of unread records in a buffer.
244 459
 * It only provides a consistent value if no reads not writes are performed
lib/ringbuffer/backend_internal.h
56 56
extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
57 57
				    size_t offset, int c, size_t len,
58 58
				    ssize_t pagecpy);
59
extern void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
60
				   size_t offset, const char *src, size_t len,
61
				   ssize_t pagecpy, int pad);
59 62
extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
60 63
					    size_t offset, const void *src,
61 64
					    size_t len, ssize_t pagecpy);
65
extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
66
		size_t offset, const char __user *src, size_t len,
67
		ssize_t pagecpy, int pad);
62 68

  
63 69
/*
64 70
 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
lib/ringbuffer/ring_buffer_backend.c
557 557
}
558 558
EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
559 559

  
560
/**
561
 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
562
 * @bufb : buffer backend
563
 * @offset : offset within the buffer
564
 * @src : source address
565
 * @len : length to write
566
 * @pagecpy : page size copied so far
567
 * @pad : character to use for padding
568
 */
569
void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
570
			size_t offset, const char *src, size_t len,
571
			ssize_t pagecpy, int pad)
572
{
573
	struct channel_backend *chanb = &bufb->chan->backend;
574
	const struct lib_ring_buffer_config *config = &chanb->config;
575
	size_t sbidx, index;
576
	struct lib_ring_buffer_backend_pages *rpages;
577
	unsigned long sb_bindex, id;
578
	int src_terminated = 0;
579

  
580
	CHAN_WARN_ON(chanb, !len);
581
	offset += pagecpy;
582
	do {
583
		len -= pagecpy;
584
		if (!src_terminated)
585
			src += pagecpy;
586
		sbidx = offset >> chanb->subbuf_size_order;
587
		index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
588

  
589
		/*
590
		 * Underlying layer should never ask for writes across
591
		 * subbuffers.
592
		 */
593
		CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
594

  
595
		pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
596
		id = bufb->buf_wsb[sbidx].id;
597
		sb_bindex = subbuffer_id_get_index(config, id);
598
		rpages = bufb->array[sb_bindex];
599
		CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
600
			     && subbuffer_id_is_noref(config, id));
601

  
602
		if (likely(!src_terminated)) {
603
			size_t count, to_copy;
604

  
605
			to_copy = pagecpy;
606
			if (pagecpy == len)
607
				to_copy--;	/* Final '\0' */
608
			count = lib_ring_buffer_do_strcpy(config,
609
					rpages->p[index].virt
610
						+ (offset & ~PAGE_MASK),
611
					src, to_copy);
612
			offset += count;
613
			/* Padding */
614
			if (unlikely(count < to_copy)) {
615
				size_t pad_len = to_copy - count;
616

  
617
				/* Next pages will have padding */
618
				src_terminated = 1;
619
				lib_ring_buffer_do_memset(rpages->p[index].virt
620
						+ (offset & ~PAGE_MASK),
621
					pad, pad_len);
622
				offset += pad_len;
623
			}
624
		} else {
625
			size_t pad_len;
626

  
627
			pad_len = pagecpy;
628
			if (pagecpy == len)
629
				pad_len--;	/* Final '\0' */
630
			lib_ring_buffer_do_memset(rpages->p[index].virt
631
					+ (offset & ~PAGE_MASK),
632
				pad, pad_len);
633
			offset += pad_len;
634
		}
635
	} while (unlikely(len != pagecpy));
636
	/* Ending '\0' */
637
	lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
638
			'\0', 1);
639
}
640
EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
560 641

  
561 642
/**
562 643
 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
......
615 696
EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
616 697

  
617 698
/**
699
 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
700
 * @bufb : buffer backend
701
 * @offset : offset within the buffer
702
 * @src : source address
703
 * @len : length to write
704
 * @pagecpy : page size copied so far
705
 * @pad : character to use for padding
706
 *
707
 * This function deals with userspace pointers, it should never be called
708
 * directly without having the src pointer checked with access_ok()
709
 * previously.
710
 */
711
void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
712
		size_t offset, const char __user *src, size_t len,
713
		ssize_t pagecpy, int pad)
714
{
715
	struct channel_backend *chanb = &bufb->chan->backend;
716
	const struct lib_ring_buffer_config *config = &chanb->config;
717
	size_t sbidx, index;
718
	struct lib_ring_buffer_backend_pages *rpages;
719
	unsigned long sb_bindex, id;
720
	int src_terminated = 0;
721

  
722
	offset += pagecpy;
723
	do {
724
		len -= pagecpy;
725
		if (!src_terminated)
726
			src += pagecpy;
727
		sbidx = offset >> chanb->subbuf_size_order;
728
		index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
729

  
730
		/*
731
		 * Underlying layer should never ask for writes across
732
		 * subbuffers.
733
		 */
734
		CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
735

  
736
		pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
737
		id = bufb->buf_wsb[sbidx].id;
738
		sb_bindex = subbuffer_id_get_index(config, id);
739
		rpages = bufb->array[sb_bindex];
740
		CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
741
				&& subbuffer_id_is_noref(config, id));
742

  
743
		if (likely(!src_terminated)) {
744
			size_t count, to_copy;
745

  
746
			to_copy = pagecpy;
747
			if (pagecpy == len)
748
				to_copy--;	/* Final '\0' */
749
			count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
750
					rpages->p[index].virt
751
						+ (offset & ~PAGE_MASK),
752
					src, to_copy);
753
			offset += count;
754
			/* Padding */
755
			if (unlikely(count < to_copy)) {
756
				size_t pad_len = to_copy - count;
757

  
758
				/* Next pages will have padding */
759
				src_terminated = 1;
760
				lib_ring_buffer_do_memset(rpages->p[index].virt
761
						+ (offset & ~PAGE_MASK),
762
					pad, pad_len);
763
				offset += pad_len;
764
			}
765
		} else {
766
			size_t pad_len;
767

  
768
			pad_len = pagecpy;
769
			if (pagecpy == len)
770
				pad_len--;	/* Final '\0' */
771
			lib_ring_buffer_do_memset(rpages->p[index].virt
772
					+ (offset & ~PAGE_MASK),
773
				pad, pad_len);
774
			offset += pad_len;
775
		}
776
	} while (unlikely(len != pagecpy));
777
	/* Ending '\0' */
778
	lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
779
			'\0', 1);
780
}
781
EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
782

  
783
/**
618 784
 * lib_ring_buffer_read - read data from ring_buffer_buffer.
619 785
 * @bufb : buffer backend
620 786
 * @offset : offset within the buffer
lttng-events.h
236 236
				      const void *src, size_t len);
237 237
	void (*event_memset)(struct lib_ring_buffer_ctx *ctx,
238 238
			     int c, size_t len);
239
	void (*event_strcpy)(struct lib_ring_buffer_ctx *ctx, const char *src,
240
			     size_t len);
241
	void (*event_strcpy_from_user)(struct lib_ring_buffer_ctx *ctx,
242
				       const char __user *src, size_t len);
239 243
	/*
240 244
	 * packet_avail_size returns the available size in the current
241 245
	 * packet. Note that the size returned is only a hint, since it
lttng-ring-buffer-client.h
629 629
}
630 630

  
631 631
static
632
void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
633
		size_t len)
634
{
635
	lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
636
}
637

  
638
static
639
void lttng_event_strcpy_from_user(struct lib_ring_buffer_ctx *ctx,
640
		const char __user *src, size_t len)
641
{
642
	lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
643
			len, '#');
644
}
645

  
646
static
632 647
wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
633 648
{
634 649
	struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
......
669 684
		.event_write = lttng_event_write,
670 685
		.event_write_from_user = lttng_event_write_from_user,
671 686
		.event_memset = lttng_event_memset,
687
		.event_strcpy = lttng_event_strcpy,
688
		.event_strcpy_from_user = lttng_event_strcpy_from_user,
672 689
		.packet_avail_size = NULL,	/* Would be racy anyway */
673 690
		.get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
674 691
		.get_hp_wait_queue = lttng_get_hp_wait_queue,
lttng-ring-buffer-metadata-client.h
326 326
}
327 327

  
328 328
static
329
void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
330
		size_t len)
331
{
332
	lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
333
}
334

  
335
static
329 336
size_t lttng_packet_avail_size(struct channel *chan)
330 337
			     
331 338
{
......
383 390
		.event_write_from_user = lttng_event_write_from_user,
384 391
		.event_memset = lttng_event_memset,
385 392
		.event_write = lttng_event_write,
393
		.event_strcpy = lttng_event_strcpy,
386 394
		.packet_avail_size = lttng_packet_avail_size,
387 395
		.get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
388 396
		.get_hp_wait_queue = lttng_get_hp_wait_queue,
probes/lttng-events.h
691 691
 */
692 692
#undef tp_copy_string_from_user
693 693
#define tp_copy_string_from_user(dest, src)				\
694
	__assign_##dest:						\
695
	{								\
696
		size_t __ustrlen;					\
697
									\
698
		if (0)							\
699
			(void) __typemap.dest;				\
700
		lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest));\
701
		__ustrlen = __get_dynamic_array_len(dest);		\
702
		if (likely(__ustrlen > 1)) {				\
703
			__chan->ops->event_write_from_user(&__ctx, src,	\
704
				__ustrlen - 1);				\
705
		}							\
706
		__chan->ops->event_memset(&__ctx, 0, 1);		\
707
	}								\
694
__assign_##dest:							\
695
	if (0)								\
696
		(void) __typemap.dest;					\
697
	lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest)); \
698
	__chan->ops->event_strcpy_from_user(&__ctx, src,		\
699
		__get_dynamic_array_len(dest));				\
708 700
	goto __end_field_##dest;
701

  
709 702
#undef tp_strcpy
710 703
#define tp_strcpy(dest, src)						\
711
	tp_memcpy(dest, src, __get_dynamic_array_len(dest))
704
__assign_##dest:							\
705
	if (0)								\
706
		(void) __typemap.dest;					\
707
	lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest)); \
708
	__chan->ops->event_strcpy(&__ctx, src, __get_dynamic_array_len(dest)); \
709
	goto __end_field_##dest;
712 710

  
713 711
/* Named field types must be defined in lttng-types.h */
714 712

  
(2-2/2)