This is the mail archive of the
gdb-patches@sources.redhat.com
mailing list for the GDB project.
[rfa/testsuite] 64-bit bigcore.exp fixes
- From: Andrew Cagney <ac131313 at redhat dot com>
- To: gdb-patches at sources dot redhat dot com
- Date: Fri, 24 Sep 2004 09:49:13 -0400
- Subject: [rfa/testsuite] 64-bit bigcore.exp fixes
Hello,
This patch to bigcore.exp addresses a few 64-bit problems:
- core file exceeding file system's file size limit
On a 64-bit system, the coredump can exceed the file-system's max
file-size leading to a truncated corefile (the ia64 rhel3 system I was
testing on maxed at 1x10^9 bytes / [us] 1 terabyte). This patch
modifies bigcore.c so that it sniffs out the file-size limit by probing
for how far a seek will go, caping things at that limit.
- use 64-bit off_t if available
i386 GNU/Linux has a 64-bit off_t (off64_t). This patch tries to use
that to compute the file-size limit, if it is available
- assume things work when dejagnu / tcl screws up the file size
This restores (and better comments) an edge case I accidently changed
with my last patch - it modifies bigcore.exp so that when dejagn/tcl
botch the file size computation it's assumed things will work.
Otherwize the test is incorrectly aborted on 32-bit systems, with a
broken dejagnu/tcl, that do support large corefiles.
Test on i386, ia64 and amd64 rhel3 systems which all pass.
ok?
Andrew
Index: ./gdb/testsuite/ChangeLog
2004-09-24 Andrew Cagney <cagney@redhat.com>
* gdb.base/bigcore.exp (extract_heap): If the expect "file size"
command fails, assume things will work.
* gdb.base/bigcore.c: Include <sys/stat.h> and <fcntl.h>.
(_GNU_SOURCE): Define.
(print_unsigned, print_hex): Change parameter to "long long".
(print_byte_count): New function, use to print byte counts.
(large_off_t, large_lseek, O_LARGEFILE): Define dependant on
O_LARGEFILE.
(main): Compute an upper bound on a corefile in max_core_size.
Limit memory chunk size to max_core_size. Limit total memory
allocated to max_core_size.
Index: ./gdb/testsuite/gdb.base/bigcore.c
===================================================================
RCS file: /cvs/src/src/gdb/testsuite/gdb.base/bigcore.c,v
retrieving revision 1.4
diff -p -u -r1.4 bigcore.c
--- ./gdb/testsuite/gdb.base/bigcore.c 14 Aug 2004 20:17:43 -0000 1.4
+++ ./gdb/testsuite/gdb.base/bigcore.c 24 Sep 2004 13:31:04 -0000
@@ -19,9 +19,14 @@
Please email any bugs, comments, and/or additions to this file to:
bug-gdb@prep.ai.mit.edu */
+/* Get 64-bit stuff if on a GNU system. */
+#define _GNU_SOURCE
+
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
@@ -43,7 +48,7 @@ print_char (char c)
}
static void
-print_unsigned (unsigned long u)
+print_unsigned (unsigned long long u)
{
if (u >= 10)
print_unsigned (u / 10);
@@ -51,7 +56,7 @@ print_unsigned (unsigned long u)
}
static void
-print_hex (unsigned long u)
+print_hex (unsigned long long u)
{
if (u >= 16)
print_hex (u / 16);
@@ -72,6 +77,16 @@ print_address (const void *a)
print_hex ((unsigned long) a);
}
+static void
+print_byte_count (unsigned long long u)
+{
+ print_unsigned (u);
+ print_string (" (");
+ print_string ("0x");
+ print_hex (u);
+ print_string (") bytes");
+}
+
/* Print the current values of RESOURCE. */
static void
@@ -122,10 +137,20 @@ static struct list heap = { &dummy, &dum
static unsigned long bytes_allocated;
+#ifdef O_LARGEFILE
+#define large_off_t off64_t
+#define large_lseek lseek64
+#else
+#define large_off_t off_t
+#define O_LARGEFILE 0
+#define large_lseek lseek
+#endif
+
int
main ()
{
size_t max_chunk_size;
+ large_off_t max_core_size;
/* Try to expand all the resource limits beyond the point of sanity
- we're after the biggest possible core file. */
@@ -144,14 +169,46 @@ main ()
maximize_rlimit (RLIMIT_AS, "stack");
#endif
+ print_string ("Maximize allocation limits ...\n");
+
+ /* Compute the largest possible corefile size. No point in trying
+ to create a corefile larger than the largest file supported by
+ the file system. What about 64-bit lseek64? */
+ {
+ int fd;
+ large_off_t tmp;
+ unlink ("bigcore.corefile");
+ fd = open ("bigcore.corefile", O_RDWR | O_CREAT | O_TRUNC | O_LARGEFILE);
+ for (tmp = 1; tmp > 0; tmp <<= 1)
+ {
+ if (large_lseek (fd, tmp, SEEK_SET) > 0)
+ max_core_size = tmp;
+ }
+ close (fd);
+ }
+
/* Compute an initial chunk size. The math is dodgy but it works
- for the moment. Perhaphs there's a constant around somewhere. */
+ for the moment. Perhaphs there's a constant around somewhere.
+ Limit this to max_core_size bytes - no point in trying to
+ allocate more than can be written to the corefile. */
{
size_t tmp;
- for (tmp = 1; tmp > 0; tmp <<= 1)
+ for (tmp = 1; tmp > 0 && tmp < max_core_size; tmp <<= 1)
max_chunk_size = tmp;
}
+ print_string (" core: ");
+ print_byte_count (max_core_size);
+ print_string ("\n");
+ print_string (" chunk: ");
+ print_byte_count (max_chunk_size);
+ print_string ("\n");
+ print_string (" large? ");
+ if (O_LARGEFILE)
+ print_string ("yes\n");
+ else
+ print_string ("no\n");
+
/* Allocate as much memory as possible creating a linked list of
each section. The linking ensures that some, but not all, the
memory is allocated. NB: Some kernels handle this efficiently -
@@ -173,9 +230,10 @@ main ()
{
unsigned long count = 0;
print_string (" ");
- print_unsigned (chunk_size);
- print_string (" bytes ... ");
- while (1)
+ print_byte_count (chunk_size);
+ print_string (" ... ");
+ while (bytes_allocated + (1 + count) * chunk_size
+ < max_core_size)
{
struct list *chunk = malloc (chunk_size);
if (chunk == NULL)
@@ -194,7 +252,7 @@ main ()
bytes_allocated += chunk_size * count;
}
print_string ("Total of ");
- print_unsigned (bytes_allocated);
+ print_byte_count (bytes_allocated);
print_string (" bytes ");
print_unsigned (chunks_allocated);
print_string (" chunks\n");
Index: ./gdb/testsuite/gdb.base/bigcore.exp
===================================================================
RCS file: /cvs/src/src/gdb/testsuite/gdb.base/bigcore.exp,v
retrieving revision 1.9
diff -p -u -r1.9 bigcore.exp
--- ./gdb/testsuite/gdb.base/bigcore.exp 23 Sep 2004 20:48:04 -0000 1.9
+++ ./gdb/testsuite/gdb.base/bigcore.exp 24 Sep 2004 13:31:04 -0000
@@ -169,16 +169,24 @@ if { $file == "" } {
# necessarily the "good" one. And we must use GDB for the comparison,
# similarly.
-set core_ok 0
if {[catch {file size $corefile} core_size] == 0} {
+ set core_ok 0
gdb_test_multiple "print bytes_allocated < $core_size" "check core size" {
-re " = 1\r\n$gdb_prompt $" {
pass "check core size"
set core_ok 1
}
}
+} {
+ # Probably failed due to the TCL build having problems with very
+ # large values. Since GDB uses a 64-bit off_t (when possible) it
+ # shouldn't have this problem. Assume that things are going to
+ # work. Without this assumption the test is skiped on systems
+ # (such as i386 GNU/Linux with patched kernel) which do pass.
+ pass "check core size"
+ set core_ok 1
}
-if {$core_ok == 0} {
+if {! $core_ok} {
untested "check core size (system does not support large corefiles)"
return 0
}