* [patch] gcc/*: Fix comment typos.
@ 2007-07-29 0:52 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-07-29 0:52 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-07-28 Kazu Hirata <kazu@codesourcery.com>
* cfglayout.c, config/arm/arm.c, config/arm/cortex-a8.md,
config/arm/neon-schedgen.ml, config/arm/neon.ml,
config/arm/vec-common.md, config/ia64/div.md, cselib.c,
df-core.c, df.h, dominance.c, optabs.c, opts.c, reg-stack.c,
regstat.c, target.h, tree-ssa-live.c, tree-ssa-pre.c,
tree-vect-transform.c, tree.def: Fix comment typos. Follow
spelling conventions.
* doc/invoke.texi: Follow spelling conventions.
Index: cfglayout.c
===================================================================
--- cfglayout.c (revision 127028)
+++ cfglayout.c (working copy)
@@ -1001,7 +1001,7 @@ force_one_exit_fallthru (void)
redirect_edge_and_branch_force (e, forwarder);
}
- /* Fix up the chain of blocks -- make FORWARDER immediately preceed the
+ /* Fix up the chain of blocks -- make FORWARDER immediately precede the
exit block. */
FOR_EACH_BB (bb)
{
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c (revision 127028)
+++ config/arm/arm.c (working copy)
@@ -6083,7 +6083,7 @@ neon_valid_immediate (rtx op, enum machi
{
unsigned HOST_WIDE_INT imm = 0;
- /* Un-invert bytes of recognized vector, if neccessary. */
+ /* Un-invert bytes of recognized vector, if necessary. */
if (invmask != 0)
for (i = 0; i < idx; i++)
bytes[i] ^= invmask;
@@ -6212,7 +6212,7 @@ neon_pairwise_reduce (rtx op0, rtx op1,
}
}
-/* Initialise a vector with non-constant elements. FIXME: We can do better
+/* Initialize a vector with non-constant elements. FIXME: We can do better
than the current implementation (building a vector on the stack and then
loading it) in many cases. See rs6000.c. */
@@ -12664,7 +12664,7 @@ arm_print_operand (FILE *stream, rtx x,
break;
/* %# is a "break" sequence. It doesn't output anything, but is used to
- seperate e.g. operand numbers from following text, if that text consists
+ separate e.g. operand numbers from following text, if that text consists
of further digits which we don't want to be part of the operand
number. */
case '#':
Index: config/arm/cortex-a8.md
===================================================================
--- config/arm/cortex-a8.md (revision 127028)
+++ config/arm/cortex-a8.md (working copy)
@@ -129,7 +129,7 @@ (define_bypass 1 "cortex_a8_alu,cortex_a
"arm_no_early_alu_shift_value_dep")
;; Multiplication instructions. These are categorized according to their
-;; reservation behaviour and the need below to distinguish certain
+;; reservation behavior and the need below to distinguish certain
;; varieties for bypasses. Results are available at the E5 stage
;; (but some of these are multi-cycle instructions which explains the
;; latencies below).
@@ -245,7 +245,7 @@ (define_insn_reservation "cortex_a8_stor
;; reads the value to be stored at the start of E3 and the ALU insn
;; writes it at the end of E2. Move instructions actually produce the
;; result at the end of E1, but since we don't have delay slots, the
-;; scheduling behaviour will be the same.
+;; scheduling behavior will be the same.
(define_bypass 0 "cortex_a8_alu,cortex_a8_alu_shift,\
cortex_a8_alu_shift_reg,cortex_a8_mov"
"cortex_a8_store1_2,cortex_a8_store3_4"
Index: config/arm/neon-schedgen.ml
===================================================================
--- config/arm/neon-schedgen.ml (revision 127028)
+++ config/arm/neon-schedgen.ml (working copy)
@@ -63,7 +63,7 @@ type availability = Source of int
type guard = Guard_none | Guard_only_m | Guard_only_n | Guard_only_d
-(* Reservation behaviours. All but the last row here correspond to one
+(* Reservation behaviors. All but the last row here correspond to one
pipeline each. Each constructor will correspond to one
define_reservation. *)
type reservation =
@@ -78,7 +78,7 @@ type reservation =
| Fmul_then_fadd | Fmul_then_fadd_2
(* This table must be kept as short as possible by conflating
- entries with the same availability behaviour.
+ entries with the same availability behavior.
First components: instruction group names
Second components: availability requirements, in the order in which
Index: config/arm/neon.ml
===================================================================
--- config/arm/neon.ml (revision 127028)
+++ config/arm/neon.ml (working copy)
@@ -177,7 +177,7 @@ type opcode =
(* Set/extract lanes from a vector. *)
| Vget_lane
| Vset_lane
- (* Initialise vector from bit pattern. *)
+ (* Initialize vector from bit pattern. *)
| Vcreate
(* Set all lanes to same value. *)
| Vdup_n
@@ -227,7 +227,7 @@ type features =
names. *)
| Instruction_name of string list
(* Mark that the intrinsic yields no instructions, or expands to yield
- behaviour that the test generator cannot test. *)
+ behavior that the test generator cannot test. *)
| No_op
(* Mark that the intrinsic has constant arguments that cannot be set
to the defaults (zero for pointers and one otherwise) in the test
Index: config/arm/vec-common.md
===================================================================
--- config/arm/vec-common.md (revision 127028)
+++ config/arm/vec-common.md (working copy)
@@ -42,7 +42,7 @@ (define_expand "mov<mode>"
})
;; Vector arithmetic. Expanders are blank, then unnamed insns implement
-;; patterns seperately for IWMMXT and Neon.
+;; patterns separately for IWMMXT and Neon.
(define_expand "add<mode>3"
[(set (match_operand:VALL 0 "s_register_operand" "")
Index: config/ia64/div.md
===================================================================
--- config/ia64/div.md (revision 127028)
+++ config/ia64/div.md (working copy)
@@ -195,7 +195,7 @@ (define_split
operands[2] = gen_rtx_REG (<MODE>mode, REGNO (operands[1]));
})
-;; Reciprical approximation
+;; Reciprocal approximation
(define_insn "recip_approx_rf"
[(set (match_operand:RF 0 "fr_register_operand" "=f")
Index: cselib.c
===================================================================
--- cselib.c (revision 127028)
+++ cselib.c (working copy)
@@ -953,7 +953,7 @@ cselib_expand_value_rtx (rtx orig, bitma
STACK_POINTER_REGNUM, FRAME_POINTER or the
HARD_FRAME_POINTER.
- Thses expansions confuses the code that notices that
+ These expansions confuses the code that notices that
stores into the frame go dead at the end of the
function and that the frame is not effected by calls
to subroutines. If you allow the
Index: df-core.c
===================================================================
--- df-core.c (revision 127028)
+++ df-core.c (working copy)
@@ -144,7 +144,7 @@ There are four ways of doing the increme
For most modern rtl passes, this is certainly the easiest way to
manage rescanning the insns. This technique also has the advantage
that the scanning information is always correct and can be relied
- apon even after changes have been made to the instructions. This
+ upon even after changes have been made to the instructions. This
technique is contra indicated in several cases:
a) If def-use chains OR use-def chains (but not both) are built,
Index: df.h
===================================================================
--- df.h (revision 127028)
+++ df.h (working copy)
@@ -311,7 +311,7 @@ struct dataflow
struct df_mw_hardreg
{
rtx mw_reg; /* The multiword hardreg. */
- /* These two bitfields are intentially oversized, in the hope that
+ /* These two bitfields are intentionally oversized, in the hope that
accesses to 16-bit fields will usually be quicker. */
ENUM_BITFIELD(df_ref_type) type : 16;
/* Used to see if the ref is read or write. */
@@ -360,7 +360,7 @@ struct df_ref
unsigned int ref_order;
unsigned int regno; /* The register number referenced. */
- /* These two bitfields are intentially oversized, in the hope that
+ /* These two bitfields are intentionally oversized, in the hope that
accesses to 16-bit fields will usually be quicker. */
ENUM_BITFIELD(df_ref_type) type : 16;
/* Type of ref. */
Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi (revision 127028)
+++ doc/invoke.texi (working copy)
@@ -1858,7 +1858,7 @@ been permitted when this option was not
In new code it is better to use @option{-fvisibility=hidden} and
export those classes which are intended to be externally visible.
Unfortunately it is possible for code to rely, perhaps accidentally,
-on the Visual Studio behaviour.
+on the Visual Studio behavior.
Among the consequences of these changes are that static data members
of the same type with the same name but defined in different shared
Index: dominance.c
===================================================================
--- dominance.c (revision 127028)
+++ dominance.c (working copy)
@@ -1448,7 +1448,7 @@ debug_dominance_info (enum cdi_direction
}
/* Prints to stderr representation of the dominance tree (for direction DIR)
- rooted in ROOT, indented by INDENT tabelators. If INDENT_FIRST is false,
+ rooted in ROOT, indented by INDENT tabulators. If INDENT_FIRST is false,
the first line of the output is not indented. */
static void
Index: optabs.c
===================================================================
--- optabs.c (revision 127028)
+++ optabs.c (working copy)
@@ -4071,7 +4071,7 @@ emit_cmp_and_jump_insns (rtx x, rtx y, e
{
/* If we're not emitting a branch, callers are required to pass
operands in an order conforming to canonical RTL. We relax this
- for commutative comparsions so callers using EQ don't need to do
+ for commutative comparisons so callers using EQ don't need to do
swapping by hand. */
gcc_assert (label || (comparison == swap_condition (comparison)));
Index: opts.c
===================================================================
--- opts.c (revision 127028)
+++ opts.c (working copy)
@@ -91,7 +91,7 @@ enum debug_info_level debug_info_level =
generated in the object file of the corresponding source file.
Both of these case are handled when the base name of the file of
the struct definition matches the base name of the source file
- of thet current compilation unit. This matching emits minimal
+ of the current compilation unit. This matching emits minimal
struct debugging information.
The base file name matching rule above will fail to emit debug
Index: reg-stack.c
===================================================================
--- reg-stack.c (revision 127028)
+++ reg-stack.c (working copy)
@@ -1355,9 +1355,9 @@ subst_stack_regs_pat (rtx insn, stack re
}
/* Uninitialized USE might happen for functions returning uninitialized
value. We will properly initialize the USE on the edge to EXIT_BLOCK,
- so it is safe to ignore the use here. This is consistent with behaviour
+ so it is safe to ignore the use here. This is consistent with behavior
of dataflow analyzer that ignores USE too. (This also imply that
- forcingly initializing the register to NaN here would lead to ICE later,
+ forcibly initializing the register to NaN here would lead to ICE later,
since the REG_DEAD notes are not issued.) */
break;
Index: regstat.c
===================================================================
--- regstat.c (revision 127028)
+++ regstat.c (working copy)
@@ -395,7 +395,7 @@ regstat_get_setjmp_crosses (void)
Process REG_N_CALLS_CROSSED.
This is used by sched_deps. A good implementation of sched-deps
- would really process the blocks directly rather than going thur
+ would really process the blocks directly rather than going through
lists of insns. If it did this, it could use the exact regs that
cross an individual call rather than using this info that merges
the info for all calls.
Index: target.h
===================================================================
--- target.h (revision 127028)
+++ target.h (working copy)
@@ -420,7 +420,7 @@ struct gcc_target
int (*builtin_vectorization_cost) (bool);
/* Return true if vector alignment is reachable (by peeling N
- interations) for the given type. */
+ iterations) for the given type. */
bool (* vector_alignment_reachable) (tree, bool);
} vectorize;
Index: tree-ssa-live.c
===================================================================
--- tree-ssa-live.c (revision 127028)
+++ tree-ssa-live.c (working copy)
@@ -461,7 +461,7 @@ mark_scope_block_unused (tree scope)
or there is precisely one subblocks and the block
has same abstract origin as outer block and declares
no variables, so it is pure wrapper.
- When we are not outputting full debug info, we also elliminate dead variables
+ When we are not outputting full debug info, we also eliminate dead variables
out of scope blocks to let them to be recycled by GGC and to save copying work
done by the inliner. */
Index: tree-ssa-pre.c
===================================================================
--- tree-ssa-pre.c (revision 127028)
+++ tree-ssa-pre.c (working copy)
@@ -1431,7 +1431,7 @@ bitmap_find_leader (bitmap_set_t set, tr
return NULL;
}
-/* Determine if EXPR, a memory expressionn, is ANTIC_IN at the top of
+/* Determine if EXPR, a memory expression, is ANTIC_IN at the top of
BLOCK by seeing if it is not killed in the block. Note that we are
only determining whether there is a store that kills it. Because
of the order in which clean iterates over values, we are guaranteed
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c (revision 127028)
+++ tree-vect-transform.c (working copy)
@@ -265,7 +265,7 @@ vect_estimate_min_profitable_iters (loop
/* If the number of iterations is unknown, or the
peeling-for-misalignment amount is unknown, we eill have to generate
- a runtime test to test the loop count agains the threshold. */
+ a runtime test to test the loop count against the threshold. */
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
|| (byte_misalign < 0))
runtime_test = true;
Index: tree.def
===================================================================
--- tree.def (revision 127028)
+++ tree.def (working copy)
@@ -884,7 +884,7 @@ DEFTREECODE (EH_FILTER_EXPR, "eh_filter_
has no value and generates no executable code. It is only used for
type based alias analysis. This is generated by C++ placement new.
CHANGE_DYNAMIC_TYPE_NEW_TYPE, the first operand, is the new type.
- CHNAGE_DYNAMIC_TYPE_LOCATION, the second operand, is the location
+ CHANGE_DYNAMIC_TYPE_LOCATION, the second operand, is the location
whose type is being changed. */
DEFTREECODE (CHANGE_DYNAMIC_TYPE_EXPR, "change_dynamic_type_expr",
tcc_statement, 2)
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-10-14 2:16 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-10-14 2:16 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-10-14 Kazu Hirata <kazu@codesourcery.com>
* config/fixed-bit.c, config/i386/cpuid.h, config/i386/i386.c,
config/i386/i386.md, config/i386/sse.md, function.c, jump.c,
modulo-sched.c, ra-conflict.c, toplev.c, tree-eh.c, tree-sra.c,
tree-ssa-dse.c, tree-vect-analyze.c, tree-vect-patterns.c,
tree-vect-transform.c: Fix comment typos.
* doc/extend.texi: Fix a typo.
Index: config/fixed-bit.c
===================================================================
--- config/fixed-bit.c (revision 129290)
+++ config/fixed-bit.c (working copy)
@@ -465,7 +465,7 @@ FIXED_DIVHELPER (FIXED_C_TYPE a, FIXED_C
r = pos_a >> (FIXED_WIDTH - FBITS);
#endif
- /* Unsigned divide r by pos_b to quo_r. The remanider is in mod. */
+ /* Unsigned divide r by pos_b to quo_r. The remainder is in mod. */
quo_r = (UINT_C_TYPE)r / (UINT_C_TYPE)pos_b;
mod = (UINT_C_TYPE)r % (UINT_C_TYPE)pos_b;
quo_s = 0;
Index: config/i386/cpuid.h
===================================================================
--- config/i386/cpuid.h (revision 129290)
+++ config/i386/cpuid.h (working copy)
@@ -117,7 +117,7 @@ __get_cpuid_max (unsigned int __ext, uns
/* Return cpuid data for requested cpuid level, as found in returned
eax, ebx, ecx and edx registers. The function checks if cpuid is
supported and returns 1 for valid cpuid information or 0 for
- unsupported cpuid level. All pointers are requred to be non-null. */
+ unsupported cpuid level. All pointers are required to be non-null. */
static __inline int
__get_cpuid (unsigned int __level,
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 129290)
+++ config/i386/i386.c (working copy)
@@ -1429,7 +1429,7 @@ unsigned int ix86_tune_features[X86_TUNE
replacement is long decoded, so this split helps here as well. */
m_K6,
- /* X86_TUNE_USE_VECTOR_CONVERTS: Preffer vector packed SSE conversion
+ /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
from integer to FP. */
m_AMDFAM10,
};
@@ -13442,8 +13442,8 @@ ix86_expand_sse4_unpack (rtx operands[2]
#define PPERM_REV_INV 0x60 /* bit reverse & invert src */
#define PPERM_ZERO 0x80 /* all 0's */
#define PPERM_ONES 0xa0 /* all 1's */
-#define PPERM_SIGN 0xc0 /* propigate sign bit */
-#define PPERM_INV_SIGN 0xe0 /* invert & propigate sign */
+#define PPERM_SIGN 0xc0 /* propagate sign bit */
+#define PPERM_INV_SIGN 0xe0 /* invert & propagate sign */
#define PPERM_SRC1 0x00 /* use first source byte */
#define PPERM_SRC2 0x10 /* use second source byte */
@@ -24879,7 +24879,7 @@ ix86_expand_round (rtx operand0, rtx ope
/* Validate whether a SSE5 instruction is valid or not.
OPERANDS is the array of operands.
NUM is the number of operands.
- USES_OC0 is true if the instruction uses OC0 and provides 4 varients.
+ USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
NUM_MEMORY is the maximum number of memory operands to accept. */
bool ix86_sse5_valid_op_p (rtx operands[], rtx insn, int num, bool uses_oc0, int num_memory)
{
@@ -24960,7 +24960,7 @@ bool ix86_sse5_valid_op_p (rtx operands[
else if (num == 4 && num_memory == 2)
{
/* If there are two memory operations, we can load one of the memory ops
- into the destination register. This is for optimizating the
+ into the destination register. This is for optimizing the
multiply/add ops, which the combiner has optimized both the multiply
and the add insns to have a memory operation. We have to be careful
that the destination doesn't overlap with the inputs. */
Index: config/i386/i386.md
===================================================================
--- config/i386/i386.md (revision 129290)
+++ config/i386/i386.md (working copy)
@@ -207,7 +207,7 @@ (define_constants
(UNSPECV_PROLOGUE_USE 14)
])
-;; Constants to represent pcomtrue/pcomfalse varients
+;; Constants to represent pcomtrue/pcomfalse variants
(define_constants
[(PCOM_FALSE 0)
(PCOM_TRUE 1)
@@ -4840,7 +4840,7 @@ (define_expand "floatsi<mode>2"
}
/* Offload operand of cvtsi2ss and cvtsi2sd into memory for
!TARGET_INTER_UNIT_CONVERSIONS
- It is neccesary for the patterns to not accept nonemmory operands
+ It is necessary for the patterns to not accept nonmemory operands
as we would optimize out later. */
else if (!TARGET_INTER_UNIT_CONVERSIONS
&& TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (operands[0]))
Index: config/i386/sse.md
===================================================================
--- config/i386/sse.md (revision 129290)
+++ config/i386/sse.md (working copy)
@@ -7749,7 +7749,7 @@ (define_insn "sse5_pmacsdqh"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-;; SSE5 parallel integer mutliply/add instructions for the intrinisics
+;; SSE5 parallel integer multiply/add instructions for the intrinisics
(define_insn "sse5_pmacsswd"
[(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
(ss_plus:V4SI
Index: doc/extend.texi
===================================================================
--- doc/extend.texi (revision 129290)
+++ doc/extend.texi (working copy)
@@ -8143,7 +8143,7 @@ v2di __builtin_ia32_pshlq (v2di, v2di)
v8hi __builtin_ia32_pshlw (v8hi, v8hi)
@end smallexample
-The following builtin-in functions are avaialble when @option{-msse5}
+The following builtin-in functions are available when @option{-msse5}
is used. The second argument must be an integer constant and generate
the machine instruction that is part of the name with the @samp{_imm}
suffix removed.
Index: function.c
===================================================================
--- function.c (revision 129290)
+++ function.c (working copy)
@@ -5702,7 +5702,7 @@ match_asm_constraints_1 (rtx insn, rtx *
asm ("" : "=r" (output), "=m" (input) : "0" (input))
- Here 'input' is used in two occurences as input (once for the
+ Here 'input' is used in two occurrences as input (once for the
input operand, once for the address in the second output operand).
If we would replace only the occurence of the input operand (to
make the matching) we would be left with this:
@@ -5714,7 +5714,7 @@ match_asm_constraints_1 (rtx insn, rtx *
value, but different pseudos) where we formerly had only one.
With more complicated asms this might lead to reload failures
which wouldn't have happen without this pass. So, iterate over
- all operands and replace all occurences of the register used. */
+ all operands and replace all occurrences of the register used. */
for (j = 0; j < noutputs; j++)
if (!rtx_equal_p (SET_DEST (p_sets[j]), input)
&& reg_overlap_mentioned_p (input, SET_DEST (p_sets[j])))
Index: jump.c
===================================================================
--- jump.c (revision 129290)
+++ jump.c (working copy)
@@ -975,7 +975,7 @@ mark_jump_label (rtx x, rtx insn, int in
(insn != NULL && x == PATTERN (insn) && JUMP_P (insn)));
}
-/* Worker function for mark_jump_label. IN_MEM is TRUE when X occurrs
+/* Worker function for mark_jump_label. IN_MEM is TRUE when X occurs
within a (MEM ...). IS_TARGET is TRUE when X is to be treated as a
jump-target; when the JUMP_LABEL field of INSN should be set or a
REG_LABEL_TARGET note should be added, not a REG_LABEL_OPERAND
Index: modulo-sched.c
===================================================================
--- modulo-sched.c (revision 129290)
+++ modulo-sched.c (working copy)
@@ -1760,7 +1760,7 @@ ps_insert_empty_row (partial_schedule_pt
/* Given U_NODE which is the node that failed to be scheduled; LOW and
UP which are the boundaries of it's scheduling window; compute using
- SCHED_NODES and II a row in the partial schedule that can be splitted
+ SCHED_NODES and II a row in the partial schedule that can be split
which will separate a critical predecessor from a critical successor
thereby expanding the window, and return it. */
static int
Index: ra-conflict.c
===================================================================
--- ra-conflict.c (revision 129290)
+++ ra-conflict.c (working copy)
@@ -1086,7 +1086,7 @@ global_conflicts (void)
}
/* Early clobbers, by definition, need to not only
- clobber the registers that are live accross the insn
+ clobber the registers that are live across the insn
but need to clobber the registers that die within the
insn. The clobbering for registers live across the
insn is handled above. */
Index: toplev.c
===================================================================
--- toplev.c (revision 129290)
+++ toplev.c (working copy)
@@ -2152,7 +2152,7 @@ lang_dependent_init (const char *name)
void
target_reinit (void)
{
- /* Reinitialise RTL backend. */
+ /* Reinitialize RTL backend. */
backend_init_target ();
/* Reinitialize lang-dependent parts. */
Index: tree-eh.c
===================================================================
--- tree-eh.c (revision 129290)
+++ tree-eh.c (working copy)
@@ -2173,7 +2173,7 @@ optimize_double_finally (tree one, tree
}
/* Perform EH refactoring optimizations that are simpler to do when code
- flow has been lowered but EH structurs haven't. */
+ flow has been lowered but EH structures haven't. */
static void
refactor_eh_r (tree t)
Index: tree-sra.c
===================================================================
--- tree-sra.c (revision 129290)
+++ tree-sra.c (working copy)
@@ -2876,7 +2876,7 @@ struct bitfield_overlap_info
};
/* Return true if a BIT_FIELD_REF<(FLD->parent), BLEN, BPOS>
- expression (refereced as BF below) accesses any of the bits in FLD,
+ expression (referenced as BF below) accesses any of the bits in FLD,
false if it doesn't. If DATA is non-null, its field_len and
field_pos are filled in such that BIT_FIELD_REF<(FLD->parent),
field_len, field_pos> (referenced as BFLD below) represents the
Index: tree-ssa-dse.c
===================================================================
--- tree-ssa-dse.c (revision 129290)
+++ tree-ssa-dse.c (working copy)
@@ -653,7 +653,7 @@ execute_simple_dse (void)
bitmap_ior_into (variables_loaded,
LOADED_SYMS (bsi_stmt (bsi)));
- /* Look for statements writting into the write only variables.
+ /* Look for statements writing into the write only variables.
And try to remove them. */
FOR_EACH_BB (bb)
Index: tree-vect-analyze.c
===================================================================
--- tree-vect-analyze.c (revision 129290)
+++ tree-vect-analyze.c (working copy)
@@ -2279,7 +2279,7 @@ vect_analyze_group_access (struct data_r
/* Analyze the access pattern of the data-reference DR.
- In case of non-consecutive accesse call vect_analyze_group_access() to
+ In case of non-consecutive accesses call vect_analyze_group_access() to
analyze groups of strided accesses. */
static bool
Index: tree-vect-patterns.c
===================================================================
--- tree-vect-patterns.c (revision 129290)
+++ tree-vect-patterns.c (working copy)
@@ -545,7 +545,7 @@ vect_recog_pow_pattern (tree last_stmt,
stmts that constitute the pattern. In this case it will be:
WIDEN_SUM <x_t, sum_0>
- Note: The widneing-sum idiom is a widening reduction pattern that is
+ Note: The widening-sum idiom is a widening reduction pattern that is
vectorized without preserving all the intermediate results. It
produces only N/2 (widened) results (by summing up pairs of
intermediate results) rather than all N results. Therefore, we
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c (revision 129290)
+++ tree-vect-transform.c (working copy)
@@ -1381,7 +1381,7 @@ vect_get_constant_vectors (slp_tree slp_
}
-/* Get vectorized defintions from SLP_NODE that contains corresponding
+/* Get vectorized definitions from SLP_NODE that contains corresponding
vectorized def-stmts. */
static void
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-09-01 20:16 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-09-01 20:16 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-09-01 Kazu Hirata <kazu@codesourcery.com>
* config/arm/arm.c, config/rs6000/ppu_intrinsics.h,
config/spu/spu.c, df-scan.c, fixed-value.c, fold-const.c,
ginclude/tgmath.h, haifa-sched.c, optabs.c, recog.c,
sched-deps.c, sched-int.h, system.h, target.h,
tree-ssa-live.c, tree-vect-transform.c, tree-vectorizer.c,
tree.def: Fix comment typos.
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c (revision 128014)
+++ config/arm/arm.c (working copy)
@@ -5868,7 +5868,7 @@ vfp3_const_double_index (rtx x)
return -1;
/* Sign, mantissa and exponent are now in the correct form to plug into the
- formulae described in the comment above. */
+ formula described in the comment above. */
return (sign << 7) | ((exponent ^ 3) << 4) | (mantissa - 16);
}
Index: config/rs6000/ppu_intrinsics.h
===================================================================
--- config/rs6000/ppu_intrinsics.h (revision 128014)
+++ config/rs6000/ppu_intrinsics.h (working copy)
@@ -188,7 +188,7 @@ typedef int __V4SI __attribute__((vector
#endif /* __powerpc64__ */
#ifdef __powerpc64__
-/* Work around the hadware bug in the current Cell implemention. */
+/* Work around the hardware bug in the current Cell implementation. */
#define __mftb() __extension__ \
({ unsigned long long result; \
__asm__ volatile ("1: mftb %[current_tb]\n" \
Index: config/spu/spu.c
===================================================================
--- config/spu/spu.c (revision 128014)
+++ config/spu/spu.c (working copy)
@@ -5484,7 +5484,7 @@ spu_builtin_vectorization_cost (bool run
/* If the branch of the runtime test is taken - i.e. - the vectorized
version is skipped - this incurs a misprediction cost (because the
vectorized version is expected to be the fall-through). So we subtract
- the latency of a mispredicted branch from the costs that are incured
+ the latency of a mispredicted branch from the costs that are incurred
when the vectorized version is executed. */
if (runtime_test)
return -19;
Index: df-scan.c
===================================================================
--- df-scan.c (revision 128014)
+++ df-scan.c (working copy)
@@ -3632,7 +3632,7 @@ df_record_entry_block_defs (bitmap entry
}
-/* Update the defs in the entry bolck. */
+/* Update the defs in the entry block. */
void
df_update_entry_block_defs (void)
Index: fixed-value.c
===================================================================
--- fixed-value.c (revision 128014)
+++ fixed-value.c (working copy)
@@ -553,7 +553,7 @@ do_fixed_divide (FIXED_VALUE_TYPE *f, co
&r.low, &r.high, 0);
}
- /* Divide r by pos_b to quo_r. The remanider is in mod. */
+ /* Divide r by pos_b to quo_r. The remainder is in mod. */
div_and_round_double (TRUNC_DIV_EXPR, 1, r.low, r.high, pos_b.low,
pos_b.high, &quo_r.low, &quo_r.high, &mod.low,
&mod.high);
@@ -613,7 +613,7 @@ do_fixed_divide (FIXED_VALUE_TYPE *f, co
return overflow_p;
}
-/* Calculate F = A << B if LEFT_P. Otherwies, F = A >> B.
+/* Calculate F = A << B if LEFT_P. Otherwise, F = A >> B.
If SAT_P, saturate the result to the max or the min.
Return true, if !SAT_P and overflow. */
Index: fold-const.c
===================================================================
--- fold-const.c (revision 128014)
+++ fold-const.c (working copy)
@@ -3537,7 +3537,7 @@ omit_one_operand (tree type, tree result
{
tree t = fold_convert (type, result);
- /* If the resulting operand is an empty statement, just return the ommited
+ /* If the resulting operand is an empty statement, just return the omitted
statement casted to void. */
if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
@@ -3555,7 +3555,7 @@ pedantic_omit_one_operand (tree type, tr
{
tree t = fold_convert (type, result);
- /* If the resulting operand is an empty statement, just return the ommited
+ /* If the resulting operand is an empty statement, just return the omitted
statement casted to void. */
if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
Index: ginclude/tgmath.h
===================================================================
--- ginclude/tgmath.h (revision 128014)
+++ ginclude/tgmath.h (working copy)
@@ -50,7 +50,7 @@ Boston, MA 02111-1307, USA. */
If any generic parameter is complex, we use a complex version. Otherwise
we use a real version. If the real part of any generic parameter is long
double, we use the long double version. Otherwise if the real part of any
- generic paramter is double or of integer type, we use the double version.
+ generic parameter is double or of integer type, we use the double version.
Otherwise we use the float version. */
#define __tg_cplx(expr) \
Index: haifa-sched.c
===================================================================
--- haifa-sched.c (revision 128014)
+++ haifa-sched.c (working copy)
@@ -2924,7 +2924,7 @@ try_ready (rtx next)
else
{
/* One of the NEXT's dependencies has been resolved.
- Recalcute NEXT's status. */
+ Recalculate NEXT's status. */
*ts &= ~SPECULATIVE & ~HARD_DEP;
@@ -3857,7 +3857,7 @@ create_check_block_twin (rtx insn, bool
DONE_SPEC (insn) = ts & BEGIN_SPEC;
CHECK_SPEC (check) = ts & BEGIN_SPEC;
- /* Luckyness of future speculations solely depends upon initial
+ /* Luckiness of future speculations solely depends upon initial
BEGIN speculation. */
if (ts & BEGIN_DATA)
fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
Index: optabs.c
===================================================================
--- optabs.c (revision 128014)
+++ optabs.c (working copy)
@@ -1265,7 +1265,7 @@ shift_optab_p (optab binoptab)
}
}
-/* Return true if BINOPTAB implements a commutatative binary operation. */
+/* Return true if BINOPTAB implements a commutative binary operation. */
static bool
commutative_optab_p (optab binoptab)
Index: recog.c
===================================================================
--- recog.c (revision 128014)
+++ recog.c (working copy)
@@ -443,7 +443,7 @@ confirm_change_group (void)
if (changes[i].unshare)
*changes[i].loc = copy_rtx (*changes[i].loc);
- /* Avoid unnecesary rescaning when multiple changes to same instruction
+ /* Avoid unnecesary rescanning when multiple changes to same instruction
are made. */
if (object)
{
Index: sched-deps.c
===================================================================
--- sched-deps.c (revision 128014)
+++ sched-deps.c (working copy)
@@ -359,7 +359,7 @@ free_deps_list (deps_list_t l)
}
/* Return true if there is no dep_nodes and deps_lists out there.
- After the region is scheduled all the depedency nodes and lists
+ After the region is scheduled all the dependency nodes and lists
should [generally] be returned to pool. */
bool
deps_pools_are_empty_p (void)
@@ -648,7 +648,7 @@ sd_finish_insn (rtx insn)
/* Find a dependency between producer PRO and consumer CON.
Search through resolved dependency lists if RESOLVED_P is true.
If no such dependency is found return NULL,
- overwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
+ otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
with an iterator pointing to it. */
static dep_t
sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
Index: sched-int.h
===================================================================
--- sched-int.h (revision 128014)
+++ sched-int.h (working copy)
@@ -897,7 +897,7 @@ struct _sd_iterator
simply a pointer to the next element to allow easy deletion from the
list. When a dep is being removed from the list the iterator
will automatically advance because the value in *linkp will start
- reffering to the next element. */
+ referring to the next element. */
dep_link_t *linkp;
/* True if the current list is a resolved one. */
Index: system.h
===================================================================
--- system.h (revision 128014)
+++ system.h (working copy)
@@ -789,7 +789,7 @@ extern void fancy_abort (const char *, i
#define CONST_CAST(X) ((void*)(X))
#endif
-/* Acivate -Wcast-qual as a warning (not an error/-Werror). */
+/* Activate -Wcast-qual as a warning (not an error/-Werror). */
#if GCC_VERSION >= 4003
#pragma GCC diagnostic warning "-Wcast-qual"
#endif
Index: target.h
===================================================================
--- target.h (revision 128014)
+++ target.h (working copy)
@@ -306,7 +306,7 @@ struct gcc_target
/* The values of the following two members are pointers to
functions used to simplify the automaton descriptions.
dfa_pre_advance_cycle and dfa_post_advance_cycle are getting called
- immediatelly before and after cycle is advanced. */
+ immediately before and after cycle is advanced. */
void (* dfa_pre_advance_cycle) (void);
void (* dfa_post_advance_cycle) (void);
Index: tree-ssa-live.c
===================================================================
--- tree-ssa-live.c (revision 128014)
+++ tree-ssa-live.c (working copy)
@@ -477,7 +477,7 @@ remove_unused_scope_block_p (tree scope)
{
next = &TREE_CHAIN (*t);
- /* Debug info of nested function reffers to the block of the
+ /* Debug info of nested function refers to the block of the
function. */
if (TREE_CODE (*t) == FUNCTION_DECL)
unused = false;
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c (revision 128014)
+++ tree-vect-transform.c (working copy)
@@ -4619,7 +4619,7 @@ vect_setup_realignment (tree stmt, block
The problem arises only if the memory access is in an inner-loop nested
inside LOOP, which is now being vectorized using outer-loop vectorization.
This is the only case when the misalignment of the memory access may not
- remain fixed thtoughout the iterations of the inner-loop (as exaplained in
+ remain fixed throughout the iterations of the inner-loop (as explained in
detail in vect_supportable_dr_alignment). In this case, not only is the
optimized realignment scheme not applicable, but also the misalignment
computation (and generation of the realignment token that is passed to
@@ -6467,7 +6467,7 @@ vect_create_cond_for_align_checks (loop_
DR: The data reference.
VECT_FACTOR: vectorization factor.
- Return an exrpession whose value is the size of segment which will be
+ Return an expression whose value is the size of segment which will be
accessed by DR. */
static tree
Index: tree-vectorizer.c
===================================================================
--- tree-vectorizer.c (revision 128014)
+++ tree-vectorizer.c (working copy)
@@ -1701,7 +1701,7 @@ vect_supportable_dr_alignment (struct da
iterations, it is *not* guaranteed that is will remain the same throughout
the execution of the inner-loop. This is because the inner-loop advances
with the original scalar step (and not in steps of VS). If the inner-loop
- step happens to be a multiple of VS, then the misalignment remaines fixed
+ step happens to be a multiple of VS, then the misalignment remains fixed
and we can use the optimized realignment scheme. For example:
for (i=0; i<N; i++)
Index: tree.def
===================================================================
--- tree.def (revision 128014)
+++ tree.def (working copy)
@@ -1017,7 +1017,7 @@ DEFTREECODE (OMP_FOR, "omp_for", tcc_sta
which of the sections to execute. */
DEFTREECODE (OMP_SECTIONS, "omp_sections", tcc_statement, 3)
-/* This tree immediatelly follows OMP_SECTIONS, and represents the switch
+/* This tree immediately follows OMP_SECTIONS, and represents the switch
used to decide which branch is taken. */
DEFTREECODE (OMP_SECTIONS_SWITCH, "omp_sections_switch", tcc_statement, 0)
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-07-07 13:38 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-07-07 13:38 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-07-07 Kazu Hirata <kazu@codesourcery.com>
* auto-inc-dec.c, config/arm/arm.c,
config/m32r/constraints.md, config/mips/mips.md,
config/rs6000/rs6000.c, cselib.c, dce.c, df-core.c,
df-problems.c, df-scan.c, df.h, dse.c, gimplify.c,
tree-if-conv.c, tree-ssa-sccvn.c, tree-ssa.c: Fix comment
typos. Follow spelling conventions.
* doc/invoke.texi, doc/rtl.texi: Fix typos.
Index: auto-inc-dec.c
===================================================================
--- auto-inc-dec.c (revision 126438)
+++ auto-inc-dec.c (working copy)
@@ -1276,7 +1276,7 @@ find_inc (bool first_try)
next add or inc, not the next insn that used the
reg. Because we are going to increment the reg
in this form, we need to make sure that there
- were no interveining uses of reg. */
+ were no intervening uses of reg. */
if (inc_insn.insn != other_insn)
return false;
}
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c (revision 126438)
+++ config/arm/arm.c (working copy)
@@ -5822,8 +5822,8 @@ vfp3_const_double_index (rtx x)
gcc_assert (mantissa >= 16 && mantissa <= 31);
/* The value of 5 here would be 4 if GCC used IEEE754-like encoding (where
- normalised significands are in the range [1, 2). (Our mantissa is shifted
- left 4 places at this point relative to normalised IEEE754 values). GCC
+ normalized significands are in the range [1, 2). (Our mantissa is shifted
+ left 4 places at this point relative to normalized IEEE754 values). GCC
internally uses [0.5, 1) (see real.c), so the exponent returned from
REAL_EXP must be altered. */
exponent = 5 - exponent;
Index: config/m32r/constraints.md
===================================================================
--- config/m32r/constraints.md (revision 126438)
+++ config/m32r/constraints.md (working copy)
@@ -104,13 +104,13 @@ (define_constraint "H"
;; Extra constraints
(define_constraint "Q"
- "A symbolic addresse loadable when ld24."
+ "A symbolic address loadable when ld24."
(ior (and (match_test "TARGET_ADDR24")
(match_test "GET_CODE (op) == LABEL_REF"))
(match_test "addr24_operand (op, VOIDmode)")))
(define_constraint "R"
- "A symbolic addresse loadable with ld24 can't be used."
+ "A symbolic address loadable with ld24 can't be used."
(ior (and (match_test "TARGET_ADDR32")
(match_test "GET_CODE (op) == LABEL_REF"))
(match_test "addr32_operand (op, VOIDmode)")))
Index: config/mips/mips.md
===================================================================
--- config/mips/mips.md (revision 126438)
+++ config/mips/mips.md (working copy)
@@ -256,7 +256,7 @@ (define_attr "jal_macro" "no,yes"
;; logical integer logical instructions
;; shift integer shift instructions
;; slt set less than instructions
-;; signext sign extend instuctions
+;; signext sign extend instructions
;; clz the clz and clo instructions
;; trap trap if instructions
;; imul integer multiply 2 operands
Index: config/rs6000/rs6000.c
===================================================================
--- config/rs6000/rs6000.c (revision 126438)
+++ config/rs6000/rs6000.c (working copy)
@@ -15629,7 +15629,7 @@ rs6000_emit_epilogue (int sibcall)
else
{
/* Make r11 point to the start of the SPE save area. We worried about
- not clobbering it when we were saving registers in the prolgoue.
+ not clobbering it when we were saving registers in the prologue.
There's no need to worry here because the static chain is passed
anew to every function. */
spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
Index: cselib.c
===================================================================
--- cselib.c (revision 126438)
+++ cselib.c (working copy)
@@ -948,7 +948,7 @@ cselib_expand_value_rtx (rtx orig, bitma
int regno = REGNO (orig);
/* The only thing that we are not willing to do (this
- is requirement of dse and if others potiential uses
+ is requirement of dse and if others potential uses
need this function we should add a parm to control
it) is that we will not substitute the
STACK_POINTER_REGNUM, FRAME_POINTER or the
Index: dce.c
===================================================================
--- dce.c (revision 126438)
+++ dce.c (working copy)
@@ -398,7 +398,7 @@ prescan_insns_for_dce (bool fast)
/* UD-based DSE routines. */
-/* Mark instructions that define artifically-used registers, such as
+/* Mark instructions that define artificially-used registers, such as
the frame pointer and the stack pointer. */
static void
Index: df-core.c
===================================================================
--- df-core.c (revision 126438)
+++ df-core.c (working copy)
@@ -207,7 +207,7 @@ There are four ways of doing the increme
insns when only a small number of them have really changed.
4) Do it yourself - In this mechanism, the pass updates the insns
- itself using the low level df primatives. Currently no pass does
+ itself using the low level df primitives. Currently no pass does
this, but it has the advantage that it is quite efficient given
that the pass generally has exact knowledge of what it is changing.
@@ -1502,7 +1502,7 @@ df_bb_delete (int bb_index)
/* Verify that there is a place for everything and everything is in
its place. This is too expensive to run after every pass in the
mainline. However this is an excellent debugging tool if the
- dataflow infomation is not being updated properly. You can just
+ dataflow information is not being updated properly. You can just
sprinkle calls in until you find the place that is changing an
underlying structure without calling the proper updating
routine. */
Index: df-problems.c
===================================================================
--- df-problems.c (revision 126438)
+++ df-problems.c (working copy)
@@ -713,7 +713,7 @@ static struct df_problem problem_RU =
df_ru_top_dump, /* Debugging start block. */
df_ru_bottom_dump, /* Debugging end block. */
NULL, /* Incremental solution verify start. */
- NULL, /* Incremental solution verfiy end. */
+ NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
TV_DF_RU, /* Timing variable. */
true /* Reset blocks on dropping out of blocks_to_analyze. */
@@ -1211,7 +1211,7 @@ static struct df_problem problem_RD =
df_rd_top_dump, /* Debugging start block. */
df_rd_bottom_dump, /* Debugging end block. */
NULL, /* Incremental solution verify start. */
- NULL, /* Incremental solution verfiy end. */
+ NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
TV_DF_RD, /* Timing variable. */
true /* Reset blocks on dropping out of blocks_to_analyze. */
@@ -3072,7 +3072,7 @@ static struct df_problem problem_UREC =
df_urec_top_dump, /* Debugging start block. */
df_urec_bottom_dump, /* Debugging end block. */
NULL, /* Incremental solution verify start. */
- NULL, /* Incremental solution verfiy end. */
+ NULL, /* Incremental solution verify end. */
&problem_LR, /* Dependent problem. */
TV_DF_UREC, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
@@ -3553,7 +3553,7 @@ static struct df_problem problem_CHAIN =
df_chain_top_dump, /* Debugging start block. */
df_chain_bottom_dump, /* Debugging end block. */
NULL, /* Incremental solution verify start. */
- NULL, /* Incremental solution verfiy end. */
+ NULL, /* Incremental solution verify end. */
&problem_RD, /* Dependent problem. */
TV_DF_CHAIN, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
@@ -4138,7 +4138,7 @@ static struct df_problem problem_NOTE =
NULL, /* Debugging start block. */
NULL, /* Debugging end block. */
NULL, /* Incremental solution verify start. */
- NULL, /* Incremental solution verfiy end. */
+ NULL, /* Incremental solution verify end. */
/* Technically this is only dependent on the live registers problem
but it will produce information if built one of uninitialized
Index: df-scan.c
===================================================================
--- df-scan.c (revision 126438)
+++ df-scan.c (working copy)
@@ -433,7 +433,7 @@ static struct df_problem problem_SCAN =
df_scan_start_block, /* Debugging start block. */
NULL, /* Debugging end block. */
NULL, /* Incremental solution verify start. */
- NULL, /* Incremental solution verfiy end. */
+ NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
TV_DF_SCAN, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
@@ -3432,7 +3432,7 @@ df_get_eh_block_artificial_uses (bitmap
{
bitmap_clear (eh_block_artificial_uses);
- /* The following code (down thru the arg_pointer seting APPEARS
+ /* The following code (down thru the arg_pointer setting APPEARS
to be necessary because there is nothing that actually
describes what the exception handling code may actually need
to keep alive. */
Index: df.h
===================================================================
--- df.h (revision 126438)
+++ df.h (working copy)
@@ -530,7 +530,7 @@ struct df
addresses. It is incremented whenever a ref is created. */
unsigned int ref_order;
- /* Problem specific control infomation. */
+ /* Problem specific control information. */
enum df_changeable_flags changeable_flags;
};
Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi (revision 126438)
+++ doc/invoke.texi (working copy)
@@ -10390,7 +10390,7 @@ or @code{remainder} built-in functions:
This option will enable GCC to use RCPSS and RSQRTSS instructions (and their
vectorized variants RCPPS and RSQRTPS) instead of DIVSS and SQRTSS (and their
vectorized variants). These instructions will be generated only when
-@option{-funsafe-math-optimizatons} is enabled.
+@option{-funsafe-math-optimizations} is enabled.
@item -mpush-args
@itemx -mno-push-args
Index: doc/rtl.texi
===================================================================
--- doc/rtl.texi (revision 126438)
+++ doc/rtl.texi (working copy)
@@ -3370,7 +3370,7 @@ A list (chain of @code{insn_list} expres
dependencies between instructions within a basic block. Neither a jump
nor a label may come between the related insns. These are only used by
the schedulers and by combine. This is a deprecated data structure.
-Def-use and use-def chains are now prefered.
+Def-use and use-def chains are now preferred.
@findex REG_NOTES
@item REG_NOTES (@var{i})
Index: dse.c
===================================================================
--- dse.c (revision 126438)
+++ dse.c (working copy)
@@ -144,7 +144,7 @@ Software Foundation, 51 Franklin Street,
... <- A
flow would replace the right hand side of the second insn with a
- reference to r100. Most of the infomation is available to add this
+ reference to r100. Most of the information is available to add this
to this pass. It has not done it because it is a lot of work in
the case that either r100 is assigned to between the first and
second insn and/or the second insn is a load of part of the value
@@ -303,7 +303,7 @@ struct insn_info
insn. If the insn is deletable, it contains only one mem set.
But it could also contain clobbers. Insns that contain more than
one mem set are not deletable, but each of those mems are here in
- order to provied info to delete other insns. */
+ order to provide info to delete other insns. */
store_info_t store_rec;
/* The linked list of mem uses in this insn. Only the reads from
@@ -403,9 +403,9 @@ struct group_info
rtx canon_base_mem;
/* These two sets of two bitmaps are used to keep track of how many
- stores are actually referencing that postion from this base. We
+ stores are actually referencing that position from this base. We
only do this for rtx bases as this will be used to assign
- postions in the bitmaps for the global problem. Bit N is set in
+ positions in the bitmaps for the global problem. Bit N is set in
store1 on the first store for offset N. Bit N is set in store2
for the second store to offset N. This is all we need since we
only care about offsets that have two or more stores for them.
@@ -420,9 +420,9 @@ struct group_info
deleted. */
bitmap store1_n, store1_p, store2_n, store2_p;
- /* The postions in this bitmap have the same assignments as the in,
+ /* The positions in this bitmap have the same assignments as the in,
out, gen and kill bitmaps. This bitmap is all zeros except for
- the postions that are occupied by stores for this group. */
+ the positions that are occupied by stores for this group. */
bitmap group_kill;
/* True if there are any positions that are to be processed
@@ -434,7 +434,7 @@ struct group_info
bool frame_related;
/* The offset_map is used to map the offsets from this base into
- postions in the global bitmaps. It is only created after all of
+ positions in the global bitmaps. It is only created after all of
the all of stores have been scanned and we know which ones we
care about. */
int *offset_map_n, *offset_map_p;
@@ -1265,7 +1265,7 @@ record_store (rtx body, bb_info_t bb_inf
bool delete = true;
/* Skip the clobbers. We delete the active insn if this insn
- shaddows the set. To have been put on the active list, it
+ shadows the set. To have been put on the active list, it
has exactly on set. */
while (!s_info->is_set)
s_info = s_info->next;
@@ -2017,7 +2017,7 @@ dse_step1 (void)
Assign each byte position in the stores that we are going to
analyze globally to a position in the bitmaps. Returns true if
- there are any bit postions assigned.
+ there are any bit positions assigned.
----------------------------------------------------------------------------*/
static void
@@ -2837,7 +2837,7 @@ dse_step5_nospill (void)
}
}
/* We do want to process the local info if the insn was
- deleted. For insntance, if the insn did a wild read, we
+ deleted. For instance, if the insn did a wild read, we
no longer need to trash the info. */
if (insn_info->insn
&& INSN_P (insn_info->insn)
Index: gimplify.c
===================================================================
--- gimplify.c (revision 126438)
+++ gimplify.c (working copy)
@@ -2141,7 +2141,7 @@ gimplify_call_expr (tree *expr_p, tree *
i++, p = TREE_CHAIN (p))
{
/* We cannot distinguish a varargs function from the case
- of excess parameters, still defering the inlining decision
+ of excess parameters, still deferring the inlining decision
to the callee is possible. */
if (!p)
break;
Index: tree-if-conv.c
===================================================================
--- tree-if-conv.c (revision 126438)
+++ tree-if-conv.c (working copy)
@@ -667,7 +667,7 @@ add_to_dst_predicate_list (struct loop *
/* During if-conversion aux field from basic block structure is used to hold
predicate list. Clean each basic block's predicate list for the given LOOP.
- Also clean aux field of succesor edges, used to hold true and false
+ Also clean aux field of successor edges, used to hold true and false
condition from conditional expression. */
static void
Index: tree-ssa-sccvn.c
===================================================================
--- tree-ssa-sccvn.c (revision 126438)
+++ tree-ssa-sccvn.c (working copy)
@@ -1399,7 +1399,7 @@ simplify_binary_expression (tree rhs)
}
result = fold_binary (TREE_CODE (rhs), TREE_TYPE (rhs), op0, op1);
- /* Make sure result is not a complex expression consiting
+ /* Make sure result is not a complex expression consisting
of operators of operators (IE (a + b) + (a + c))
Otherwise, we will end up with unbounded expressions if
fold does anything at all. */
Index: tree-ssa.c
===================================================================
--- tree-ssa.c (revision 126438)
+++ tree-ssa.c (working copy)
@@ -1006,7 +1006,7 @@ useless_type_conversion_p (tree outer_ty
/* Otherwise pointers/references are equivalent if their pointed
to types are effectively the same. We can strip qualifiers
- on pointed-to types for further comparsion, which is done in
+ on pointed-to types for further comparison, which is done in
the callee. */
return useless_type_conversion_p (TREE_TYPE (outer_type),
TREE_TYPE (inner_type));
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-05-25 23:07 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-05-25 23:07 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-05-25 Kazu Hirata <kazu@codesourcery.com>
* cfglayout.c, cgraphunit.c, config/avr/avr.c, fold-const.c,
haifa-sched.c, optabs.h, tree-affine.c, tree-data-ref.c,
tree-predcom.c, tree-ssa-alias-warnings.c,
tree-ssa-forwprop.c, tree-vect-analyze.c, tree-vrp.c: Fix
comment typos. Follow spelling conventions.
* doc/cpp.texi, doc/invoke.texi: Fix typos.
Index: cfglayout.c
===================================================================
--- cfglayout.c (revision 125079)
+++ cfglayout.c (working copy)
@@ -237,7 +237,7 @@ int prologue_locator;
int epilogue_locator;
/* Hold current location information and last location information, so the
- datastructures are built lazilly only when some instructions in given
+ datastructures are built lazily only when some instructions in given
place are needed. */
location_t curr_location, last_location;
static tree curr_block, last_block;
Index: cgraphunit.c
===================================================================
--- cgraphunit.c (revision 125079)
+++ cgraphunit.c (working copy)
@@ -172,7 +172,7 @@ static GTY (()) tree static_ctors;
static GTY (()) tree static_dtors;
/* When target does not have ctors and dtors, we call all constructor
- and destructor by special initialization/destruction functio
+ and destructor by special initialization/destruction function
recognized by collect2.
When we are going to build this function, collect all constructors and
Index: config/avr/avr.c
===================================================================
--- config/avr/avr.c (revision 125079)
+++ config/avr/avr.c (working copy)
@@ -623,7 +623,7 @@ expand_prologue (void)
char buffer[40];
sprintf (buffer, "%s - %d", avr_init_stack, (int) size);
rtx sym = gen_rtx_SYMBOL_REF (HImode, ggc_strdup (buffer));
- /* Initialise stack pointer using frame pointer. */
+ /* Initialize stack pointer using frame pointer. */
insn = emit_move_insn (frame_pointer_rtx, sym);
RTX_FRAME_RELATED_P (insn) = 1;
insn = emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
Index: doc/cpp.texi
===================================================================
--- doc/cpp.texi (revision 125079)
+++ doc/cpp.texi (working copy)
@@ -1919,7 +1919,7 @@ underscores.
@item __COUNTER__
This macro expands to sequential integral values starting from 0. In
-conjuction with the @code{##} operator, this provides a convenient means to
+conjunction with the @code{##} operator, this provides a convenient means to
generate unique identifiers. Care must be taken to ensure that
@code{__COUNTER__} is not expanded prior to inclusion of precompiled headers
which use it. Otherwise, the precompiled headers will not be used.
Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi (revision 125079)
+++ doc/invoke.texi (working copy)
@@ -7727,8 +7727,8 @@ The @code{print-asm-header} function tak
prints a banner like:
@smallexample
-Assember options
-================
+Assembler options
+=================
Use "-Wa,OPTION" to pass "OPTION" to the assembler.
@end smallexample
Index: fold-const.c
===================================================================
--- fold-const.c (revision 125079)
+++ fold-const.c (working copy)
@@ -888,7 +888,7 @@ div_if_zero_remainder (enum tree_code co
int1l = TREE_INT_CST_LOW (arg1);
int1h = TREE_INT_CST_HIGH (arg1);
- /* &obj[0] + -128 really should be compiled as &obj[-8] rahter than
+ /* &obj[0] + -128 really should be compiled as &obj[-8] rather than
&obj[some_exotic_number]. */
if (POINTER_TYPE_P (type))
{
Index: haifa-sched.c
===================================================================
--- haifa-sched.c (revision 125079)
+++ haifa-sched.c (working copy)
@@ -738,7 +738,7 @@ priority (rtx insn)
if (! INSN_P (insn))
return 0;
- /* We should not be insterested in priority of an already scheduled insn. */
+ /* We should not be interested in priority of an already scheduled insn. */
gcc_assert (QUEUE_INDEX (insn) != QUEUE_SCHEDULED);
if (!INSN_PRIORITY_KNOWN (insn))
Index: optabs.h
===================================================================
--- optabs.h (revision 125079)
+++ optabs.h (working copy)
@@ -89,13 +89,13 @@ enum optab_index
/* Signed multiply and add with the result and addend one machine mode
wider than the multiplicand and multiplier. */
OTI_smadd_widen,
- /* Unigned multiply and add with the result and addend one machine mode
+ /* Unsigned multiply and add with the result and addend one machine mode
wider than the multiplicand and multiplier. */
OTI_umadd_widen,
/* Signed multiply and subtract the result and minuend one machine mode
wider than the multiplicand and multiplier. */
OTI_smsub_widen,
- /* Unigned multiply and subtract the result and minuend one machine mode
+ /* Unsigned multiply and subtract the result and minuend one machine mode
wider than the multiplicand and multiplier. */
OTI_umsub_widen,
Index: tree-affine.c
===================================================================
--- tree-affine.c (revision 125079)
+++ tree-affine.c (working copy)
@@ -637,7 +637,7 @@ free_affine_expand_cache (struct pointer
/* If VAL != CST * DIV for any constant CST, returns false.
Otherwise, if VAL != 0 (and hence CST != 0), and *MULT_SET is true,
additionally compares CST and MULT, and if they are different,
- returns false. Finally, if neither of these two cases occcur,
+ returns false. Finally, if neither of these two cases occur,
true is returned, and if CST != 0, CST is stored to MULT and
MULT_SET is set to true. */
Index: tree-data-ref.c
===================================================================
--- tree-data-ref.c (revision 125079)
+++ tree-data-ref.c (working copy)
@@ -660,7 +660,7 @@ dr_analyze_innermost (struct data_refere
}
/* Determines the base object and the list of indices of memory reference
- DR, analysed in loop nest NEST. */
+ DR, analyzed in loop nest NEST. */
static void
dr_analyze_indices (struct data_reference *dr, struct loop *nest)
@@ -1225,7 +1225,7 @@ initialize_data_dependence_relation (str
/* If the base of the object is not invariant in the loop nest, we cannot
analyse it. TODO -- in fact, it would suffice to record that there may
- be arbitrary depencences in the loops where the base object varies. */
+ be arbitrary dependences in the loops where the base object varies. */
if (!object_address_invariant_in_loop_p (VEC_index (loop_p, loop_nest, 0),
DR_BASE_OBJECT (a)))
{
Index: tree-predcom.c
===================================================================
--- tree-predcom.c (revision 125079)
+++ tree-predcom.c (working copy)
@@ -793,7 +793,7 @@ end:
}
/* Returns true if the component COMP satisfies the conditions
- described in 2) at the begining of this file. LOOP is the current
+ described in 2) at the beginning of this file. LOOP is the current
loop. */
static bool
@@ -850,7 +850,7 @@ suitable_component_p (struct loop *loop,
/* Check the conditions on references inside each of components COMPS,
and remove the unsuitable components from the list. The new list
of components is returned. The conditions are described in 2) at
- the begining of this file. LOOP is the current loop. */
+ the beginning of this file. LOOP is the current loop. */
static struct component *
filter_suitable_components (struct loop *loop, struct component *comps)
@@ -1752,7 +1752,7 @@ execute_pred_commoning (struct loop *loo
update_ssa (TODO_update_ssa_only_virtuals);
}
-/* For each reference in CHAINS, if its definining statement is
+/* For each reference in CHAINS, if its defining statement is
ssa name, set it to phi node that defines it. */
static void
@@ -1771,7 +1771,7 @@ replace_phis_by_defined_names (VEC (chai
}
}
-/* For each reference in CHAINS, if its definining statement is
+/* For each reference in CHAINS, if its defining statement is
phi node, set it to the ssa name that is defined by it. */
static void
@@ -2018,7 +2018,7 @@ find_associative_operation_root (tree st
/* Returns the common statement in that NAME1 and NAME2 have a use. If there
is no such statement, returns NULL_TREE. In case the operation used on
- NAME1 and NAME2 is associative and comutative, returns the root of the
+ NAME1 and NAME2 is associative and commutative, returns the root of the
tree formed by this operation instead of the statement that uses NAME1 or
NAME2. */
Index: tree-ssa-alias-warnings.c
===================================================================
--- tree-ssa-alias-warnings.c (revision 125079)
+++ tree-ssa-alias-warnings.c (working copy)
@@ -154,7 +154,7 @@
associate different tags with MEM while building points-to information,
thus before we get to analyze it.
XXX: this could be solved by either running with -fno-strict-aliasing
- or by recording the points-to information before splitting the orignal
+ or by recording the points-to information before splitting the original
tag based on type.
Example 3.
@@ -316,7 +316,7 @@ struct match_info
tree object;
bool is_ptr;
/* The difference between the number of references to OBJECT
- and the number of occurences of &OBJECT. */
+ and the number of occurrences of &OBJECT. */
int found;
};
@@ -565,7 +565,7 @@ find_references_in_function (void)
/* Find the reference site for OBJECT.
- If IS_PTR is true, look for derferences of OBJECT instead.
+ If IS_PTR is true, look for dereferences of OBJECT instead.
XXX: only the first site is returned in the current
implementation. If there are no matching sites, return NULL_TREE. */
Index: tree-ssa-forwprop.c
===================================================================
--- tree-ssa-forwprop.c (revision 125079)
+++ tree-ssa-forwprop.c (working copy)
@@ -1165,7 +1165,7 @@ phiprop_insert_phi (basic_block bb, tree
}
if (TREE_CODE (old_arg) == SSA_NAME)
- /* Reuse a formely created dereference. */
+ /* Reuse a formerly created dereference. */
new_var = phivn[SSA_NAME_VERSION (old_arg)].value;
else
{
Index: tree-vect-analyze.c
===================================================================
--- tree-vect-analyze.c (revision 125079)
+++ tree-vect-analyze.c (working copy)
@@ -2210,7 +2210,7 @@ vect_stmt_relevant_p (tree stmt, loop_ve
- case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
skip DEF_STMT cause it had already been processed.
- Return true if everyting is as expected. Return false otherwise. */
+ Return true if everything is as expected. Return false otherwise. */
static bool
process_use (tree stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
Index: tree-vrp.c
===================================================================
--- tree-vrp.c (revision 125079)
+++ tree-vrp.c (working copy)
@@ -4627,7 +4627,7 @@ vrp_visit_assignment (tree stmt, tree *o
}
/* Helper that gets the value range of the SSA_NAME with version I
- or a symbolic range contaning the SSA_NAME only if the value range
+ or a symbolic range containing the SSA_NAME only if the value range
is varying or undefined. */
static inline value_range_t
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-04-15 15:01 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-04-15 15:01 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-04-15 Kazu Hirata <kazu@codesourcery.com>
* config/i386/i386.c, config/s390/s390.c, config/s390/s390.md,
tree-ssa-loop-niter.c, tree-ssa-structalias.c, tree-vrp.c: Fix
comment typos.
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 123842)
+++ config/i386/i386.c (working copy)
@@ -13566,8 +13566,8 @@ scale_counter (rtx countreg, int scale)
return sc;
}
-/* Return mode for the memcpy/memset loop counter. Preffer SImode over DImode
- for constant loop counts. */
+/* Return mode for the memcpy/memset loop counter. Prefer SImode over
+ DImode for constant loop counts. */
static enum machine_mode
counter_mode (rtx count_exp)
@@ -21141,7 +21141,7 @@ ix86_md_asm_clobbers (tree outputs ATTRI
return clobbers;
}
-/* Implementes target vector targetm.asm.encode_section_info. This
+/* Implements target vector targetm.asm.encode_section_info. This
is not used by netware. */
static void ATTRIBUTE_UNUSED
Index: config/s390/s390.c
===================================================================
--- config/s390/s390.c (revision 123842)
+++ config/s390/s390.c (working copy)
@@ -2656,7 +2656,7 @@ s390_secondary_reload (bool in_p, rtx x,
sri->icode = (TARGET_64BIT ?
CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
- /* Peforming a multiword move from or to memory we have to make sure the
+ /* Performing a multiword move from or to memory we have to make sure the
second chunk in memory is addressable without causing a displacement
overflow. If that would be the case we calculate the address in
a scratch register. */
@@ -2666,7 +2666,7 @@ s390_secondary_reload (bool in_p, rtx x,
&& !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
+ GET_MODE_SIZE (mode) - 1))
{
- /* For GENERAL_REGS a displacement overflow is no problem if occuring
+ /* For GENERAL_REGS a displacement overflow is no problem if occurring
in a s_operand address since we may fallback to lm/stm. So we only
have to care about overflows in the b+i+d case. */
if ((reg_classes_intersect_p (GENERAL_REGS, class)
Index: config/s390/s390.md
===================================================================
--- config/s390/s390.md (revision 123842)
+++ config/s390/s390.md (working copy)
@@ -308,7 +308,7 @@ (define_mode_attr Rf [(TF "f") (DF "R")
;; within instruction mnemonics.
(define_mode_attr bt [(TF "b") (DF "b") (SF "b") (TD "t") (DD "t") (SD "t")])
-;; Although it is unprecise for z9-ec we handle all dfp instructions like
+;; Although it is imprecise for z9-ec we handle all dfp instructions like
;; bfp regarding the pipeline description.
(define_mode_attr bfp [(TF "tf") (DF "df") (SF "sf")
(TD "tf") (DD "df") (SD "sf")])
Index: tree-ssa-loop-niter.c
===================================================================
--- tree-ssa-loop-niter.c (revision 123842)
+++ tree-ssa-loop-niter.c (working copy)
@@ -2474,7 +2474,7 @@ record_estimate (struct loop *loop, tree
delta = double_int_two;
i_bound = double_int_add (i_bound, delta);
- /* If an overflow occured, ignore the result. */
+ /* If an overflow occurred, ignore the result. */
if (double_int_ucmp (i_bound, delta) < 0)
return;
Index: tree-ssa-structalias.c
===================================================================
--- tree-ssa-structalias.c (revision 123842)
+++ tree-ssa-structalias.c (working copy)
@@ -3226,7 +3226,7 @@ update_alias_info (tree stmt, struct ali
combination of direct symbol references and pointer
dereferences (e.g., MEMORY_VAR = *PTR) or if a call site has
memory symbols in its argument list, but these cases do not
- occur so frequently as to constitue a serious problem. */
+ occur so frequently as to constitute a serious problem. */
if (STORED_SYMS (stmt))
EXECUTE_IF_SET_IN_BITMAP (STORED_SYMS (stmt), 0, i, bi)
{
Index: tree-vrp.c
===================================================================
--- tree-vrp.c (revision 123842)
+++ tree-vrp.c (working copy)
@@ -1815,7 +1815,7 @@ extract_range_from_binary_expr (value_ra
/* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
then drop to VR_VARYING. Outside of this range we get undefined
- behaviour from the shift operation. We cannot even trust
+ behavior from the shift operation. We cannot even trust
SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
shifts, and the operation at the tree level may be widened. */
if (code == RSHIFT_EXPR)
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-03-17 18:11 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-03-17 18:11 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-03-17 Kazu Hirata <kazu@codesourcery.com>
* config/arm/arm.c, config/arm/thumb2.md, config/m68k/m68k.c,
config/spu/spu.c, omega.h, passes.c, predict.c: Fix comment
typos.
* doc/cpp.texi, doc/extend.texi, doc/invoke.texi: Fix typos.
Follow spelling conventions.
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c (revision 123024)
+++ config/arm/arm.c (working copy)
@@ -12247,7 +12247,7 @@ get_arm_condition_code (rtx comparison)
}
}
-/* Tell arm_asm_ouput_opcode to output IT blocks for conditionally executed
+/* Tell arm_asm_output_opcode to output IT blocks for conditionally executed
instructions. */
void
thumb2_final_prescan_insn (rtx insn)
Index: config/arm/thumb2.md
===================================================================
--- config/arm/thumb2.md (revision 123024)
+++ config/arm/thumb2.md (working copy)
@@ -277,7 +277,7 @@ (define_insn "pic_load_dot_plus_four"
)
;; Thumb-2 always has load/store halfword instructions, so we can avoid a lot
-;; of the messyness associated with the ARM patterns.
+;; of the messiness associated with the ARM patterns.
(define_insn "*thumb2_movhi_insn"
[(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
(match_operand:HI 1 "general_operand" "rI,n,r,m"))]
Index: config/m68k/m68k.c
===================================================================
--- config/m68k/m68k.c (revision 123024)
+++ config/m68k/m68k.c (working copy)
@@ -3732,7 +3732,7 @@ print_operand_address (FILE *file, rtx a
int labelno;
/* If ADDR is a (d8,pc,Xn) address, this is the number of the
- label being acceesed, otherwise it is -1. */
+ label being accessed, otherwise it is -1. */
labelno = (address.offset
&& !address.base
&& GET_CODE (address.offset) == LABEL_REF
Index: config/spu/spu.c
===================================================================
--- config/spu/spu.c (revision 123024)
+++ config/spu/spu.c (working copy)
@@ -1377,7 +1377,7 @@ print_operand (FILE * file, rtx x, int c
output_addr_const (file, x);
return;
- /* unsed letters
+ /* unused letters
o qr uvw yz
AB OPQR UVWXYZ */
default:
Index: doc/cpp.texi
===================================================================
--- doc/cpp.texi (revision 123024)
+++ doc/cpp.texi (working copy)
@@ -2030,7 +2030,7 @@ neither @code{extern} nor @code{static}
standalone function.
If this macro is defined, GCC supports the @code{gnu_inline} function
-attribute as a way to always get the gnu89 behaviour. Support for
+attribute as a way to always get the gnu89 behavior. Support for
this and @code{__GNUC_GNU_INLINE__} was added in GCC 4.1.3. If
neither macro is defined, an older version of GCC is being used:
@code{inline} functions will be compiled in gnu89 mode, and the
Index: doc/extend.texi
===================================================================
--- doc/extend.texi (revision 123024)
+++ doc/extend.texi (working copy)
@@ -2262,7 +2262,7 @@ unlikely executed. The function is opti
many targets it is placed into special subsection of the text section so all
cold functions appears close together improving code locality of non-cold parts
of program. The paths leading to call of cold functions within code are marked
-as unlikely by the branch prediction mechanizm. It is thus useful to mark
+as unlikely by the branch prediction mechanism. It is thus useful to mark
functions used to handle unlikely conditions, such as @code{perror}, as cold to
improve optimization of hot functions that do call marked functions in rare
occasions.
Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi (revision 123024)
+++ doc/invoke.texi (working copy)
@@ -1376,7 +1376,7 @@ C99 semantics for @code{inline} when in
specifies the default behavior). This option was first supported in
GCC 4.3. This option is not supported in C89 or gnu89 mode.
-The preprocesor macros @code{__GNUC_GNU_INLINE__} and
+The preprocessor macros @code{__GNUC_GNU_INLINE__} and
@code{__GNUC_STDC_INLINE__} may be used to check which semantics are
in effect for @code{inline} functions. @xref{Common Predefined
Macros,,,cpp,The C Preprocessor}.
Index: omega.h
===================================================================
--- omega.h (revision 123024)
+++ omega.h (working copy)
@@ -198,7 +198,7 @@ omega_copy_eqn (eqn e1, eqn e2, int s)
memcpy (e1->coef, e2->coef, (s + 1) * sizeof (int));
}
-/* Intialize E = 0. Equation E contains S variables. */
+/* Initialize E = 0. Equation E contains S variables. */
static inline void
omega_init_eqn_zero (eqn e, int s)
Index: passes.c
===================================================================
--- passes.c (revision 123024)
+++ passes.c (working copy)
@@ -460,7 +460,7 @@ init_optimization_passes (void)
/* Interprocedural optimization passes.
All these passes are ignored in -fno-unit-at-a-time
- except for subpases of early_local_pases. */
+ except for subpasses of early_local_passes. */
p = &all_ipa_passes;
NEXT_PASS (pass_ipa_function_and_variable_visibility);
NEXT_PASS (pass_ipa_early_inline);
Index: predict.c
===================================================================
--- predict.c (revision 123024)
+++ predict.c (working copy)
@@ -1311,10 +1311,10 @@ tree_estimate_probability (void)
care for error returns and other cases are often used for
fast paths through function.
- Since we've already removed the return statments, we are
+ Since we've already removed the return statements, we are
looking for CFG like:
- if (conditoinal)
+ if (conditional)
{
..
goto return_block
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-02-19 3:33 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-02-19 3:33 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-02-19 Kazu Hirata <kazu@codesourcery.com>
* config/m68k/linux-unwind.h: Fix a comment typo.
* target.h: Follow spelling conventions.
Index: config/m68k/linux-unwind.h
===================================================================
--- config/m68k/linux-unwind.h (revision 122117)
+++ config/m68k/linux-unwind.h (working copy)
@@ -34,7 +34,7 @@ Boston, MA 02110-1301, USA. */
#include <signal.h>
-/* <sys/ucontext.h> is unfortunaly broken right now */
+/* <sys/ucontext.h> is unfortunately broken right now. */
struct uw_ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
Index: target.h
===================================================================
--- target.h (revision 122117)
+++ target.h (working copy)
@@ -115,7 +115,7 @@ struct gcc_target
/* Output code that will globalize a label. */
void (* globalize_label) (FILE *, const char *);
- /* Output code that will globalise a declaration. */
+ /* Output code that will globalize a declaration. */
void (* globalize_decl_name) (FILE *, tree);
/* Output code that will emit a label for unwind info, if this
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-02-18 1:34 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-02-18 1:34 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-02-18 Kazu Hirata <kazu@codesourcery.com>
* cfgloop.c, config/alpha/alpha.c, config/bfin/bfin.c,
config/i386/athlon.md, config/ia64/ia64.md,
config/rs6000/rs6000.c, config/s390/s390.c, config/spu/spu.md,
df-problems.c, df.h, fold-const.c, ipa-cp.c, ipa-inline.c,
ipa-prop.h, see.c, struct-equiv.c, tree-inline.c,
tree-ssa-loop-niter.c, tree-vect-analyze.c,
tree-vect-transform.c: Fix comment typos.
Index: cfgloop.c
===================================================================
--- cfgloop.c (revision 122074)
+++ cfgloop.c (working copy)
@@ -701,7 +701,7 @@ disambiguate_multiple_latches (struct lo
{
edge e;
- /* We eliminate the mutiple latches by splitting the header to the forwarder
+ /* We eliminate the multiple latches by splitting the header to the forwarder
block F and the rest R, and redirecting the edges. There are two cases:
1) If there is a latch edge E that corresponds to a subloop (we guess
Index: config/alpha/alpha.c
===================================================================
--- config/alpha/alpha.c (revision 122074)
+++ config/alpha/alpha.c (working copy)
@@ -4434,7 +4434,7 @@ emit_insxl (enum machine_mode mode, rtx
return ret;
}
-/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
+/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
to perform. MEM is the memory on which to operate. VAL is the second
operand of the binary operator. BEFORE and AFTER are optional locations to
return the value of MEM either before of after the operation. SCRATCH is
Index: config/bfin/bfin.c
===================================================================
--- config/bfin/bfin.c (revision 122074)
+++ config/bfin/bfin.c (working copy)
@@ -1537,7 +1537,7 @@ function_arg (CUMULATIVE_ARGS *cum, enum
For args passed entirely in registers or entirely in memory, zero.
Refer VDSP C Compiler manual, our ABI.
- First 3 words are in registers. So, if a an argument is larger
+ First 3 words are in registers. So, if an argument is larger
than the registers available, it will span the register and
stack. */
Index: config/i386/athlon.md
===================================================================
--- config/i386/athlon.md (revision 122074)
+++ config/i386/athlon.md (working copy)
@@ -603,7 +603,7 @@ (define_insn_reservation "athlon_mmxssel
"athlon-direct,athlon-fploadk8,athlon-fstore")
;; On AMDFAM10 all double, single and integer packed and scalar SSEx data
;; loads generated are direct path, latency of 2 and do not use any FP
-;; executions units. No seperate entries for movlpx/movhpx loads, which
+;; executions units. No separate entries for movlpx/movhpx loads, which
;; are direct path, latency of 4 and use the FADD/FMUL FP execution units,
;; as they will not be generated.
(define_insn_reservation "athlon_sseld_amdfam10" 2
@@ -637,7 +637,7 @@ (define_insn_reservation "athlon_mmxsses
"athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)")
;; On AMDFAM10 all double, single and integer packed SSEx data stores
;; generated are all double path, latency of 2 and use the FSTORE FP
-;; execution unit. No entries seperate for movupx/movdqu, which are
+;; execution unit. No entries separate for movupx/movdqu, which are
;; vector path, latency of 3 and use the FSTORE*2 FP execution unit,
;; as they will not be generated.
(define_insn_reservation "athlon_ssest_amdfam10" 2
Index: config/ia64/ia64.md
===================================================================
--- config/ia64/ia64.md (revision 122074)
+++ config/ia64/ia64.md (working copy)
@@ -476,7 +476,7 @@ (define_mode_attr mem_constr [(BI "*m")
;; Define register predicate prefix.
;; We can generate speculative loads only for general and fp registers - this
-;; is constrainted in ia64.c: ia64_speculate_insn ().
+;; is constrained in ia64.c: ia64_speculate_insn ().
(define_mode_attr reg_pred_prefix [(BI "gr") (QI "gr") (HI "gr") (SI "gr") (DI "grfr") (SF "grfr") (DF "grfr") (XF "fr") (TI "fr")])
(define_mode_attr ld_class [(BI "ld") (QI "ld") (HI "ld") (SI "ld") (DI "ld,fld") (SF "fld,ld") (DF "fld,ld") (XF "fld") (TI "fldp")])
Index: config/rs6000/rs6000.c
===================================================================
--- config/rs6000/rs6000.c (revision 122074)
+++ config/rs6000/rs6000.c (working copy)
@@ -12705,7 +12705,7 @@ emit_store_conditional (enum machine_mod
emit_insn (fn (res, mem, val));
}
-/* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
+/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
to perform. MEM is the memory on which to operate. VAL is the second
operand of the binary operator. BEFORE and AFTER are optional locations to
return the value of MEM either before of after the operation. SCRATCH is
Index: config/s390/s390.c
===================================================================
--- config/s390/s390.c (revision 122074)
+++ config/s390/s390.c (working copy)
@@ -4228,7 +4228,7 @@ s390_expand_cs_hqi (enum machine_mode mo
}
/* Expand an atomic operation CODE of mode MODE. MEM is the memory location
- and VAL the value to play with. If AFTER is true then store the the value
+ and VAL the value to play with. If AFTER is true then store the value
MEM holds after the operation, if AFTER is false then store the value MEM
holds before the operation. If TARGET is zero then discard that value, else
store it to TARGET. */
Index: config/spu/spu.md
===================================================================
--- config/spu/spu.md (revision 122074)
+++ config/spu/spu.md (working copy)
@@ -56,7 +56,7 @@ (define_insn_reservation "FP7" 7 (eq_att
"pipe0, fp, nothing*5")
;; The behavior of the double precision is that both pipes stall
-;; for 6 cycles and the the rest of the operation pipelines for
+;; for 6 cycles and the rest of the operation pipelines for
;; 7 cycles. The simplest way to model this is to simply ignore
;; the 6 cyle stall.
(define_insn_reservation "FPD" 7 (eq_attr "type" "fpd")
Index: df-problems.c
===================================================================
--- df-problems.c (revision 122074)
+++ df-problems.c (working copy)
@@ -1968,7 +1968,7 @@ df_ur_init (struct dataflow *dflow, bitm
}
-/* Or in the stack regs, hard regs and early clobber regs into the the
+/* Or in the stack regs, hard regs and early clobber regs into the
ur_in sets of all of the blocks. */
static void
@@ -2550,7 +2550,7 @@ df_urec_init (struct dataflow *dflow, bi
}
-/* Or in the stack regs, hard regs and early clobber regs into the the
+/* Or in the stack regs, hard regs and early clobber regs into the
ur_in sets of all of the blocks. */
static void
Index: df.h
===================================================================
--- df.h (revision 122074)
+++ df.h (working copy)
@@ -562,7 +562,7 @@ struct df_urec_bb_info
{
/* Local sets to describe the basic blocks. */
bitmap earlyclobber; /* The set of registers that are referenced
- with an an early clobber mode. */
+ with an early clobber mode. */
/* Kill and gen are defined as in the UR problem. */
bitmap kill;
bitmap gen;
Index: fold-const.c
===================================================================
--- fold-const.c (revision 122074)
+++ fold-const.c (working copy)
@@ -8860,8 +8860,8 @@ fold_comparison (enum tree_code code, tr
}
/* If this is a comparison of complex values and both sides
- are COMPLEX_CST, do the comparision by parts to fold the
- comparision. */
+ are COMPLEX_CST, do the comparison by parts to fold the
+ comparison. */
if ((code == EQ_EXPR || code == NE_EXPR)
&& TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
&& TREE_CODE (arg0) == COMPLEX_CST
Index: ipa-cp.c
===================================================================
--- ipa-cp.c (revision 122074)
+++ ipa-cp.c (working copy)
@@ -65,7 +65,7 @@ Software Foundation, 51 Franklin Street,
arguments
of the callsite. There are three types of values :
Formal - the caller's formal parameter is passed as an actual argument.
- Constant - a constant is passed as a an actual argument.
+ Constant - a constant is passed as an actual argument.
Unknown - neither of the above.
In order to compute the jump functions, we need the modify information for
Index: ipa-inline.c
===================================================================
--- ipa-inline.c (revision 122074)
+++ ipa-inline.c (working copy)
@@ -1345,7 +1345,7 @@ cgraph_decide_inlining_incrementally (st
continue;
}
/* When the function body would grow and inlining the function won't
- elliminate the need for offline copy of the function, don't inline.
+ eliminate the need for offline copy of the function, don't inline.
*/
if (mode == INLINE_SIZE
&& (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
Index: ipa-prop.h
===================================================================
--- ipa-prop.h (revision 122074)
+++ ipa-prop.h (working copy)
@@ -29,7 +29,7 @@ Software Foundation, 51 Franklin Street,
/* A jump function for a callsite represents the values passed as actual
arguments of the callsite. There are three main types of values :
Formal - the caller's formal parameter is passed as an actual argument.
- Constant - a constant is passed as a an actual argument.
+ Constant - a constant is passed as an actual argument.
Unknown - neither of the above.
Integer and real constants are represented as CONST_IPATYPE and Fortran
constants are represented as CONST_IPATYPE_REF. */
Index: see.c
===================================================================
--- see.c (revision 122074)
+++ see.c (working copy)
@@ -3170,7 +3170,7 @@ see_store_reference_and_extension (rtx r
A definition is relevant if its root has
((entry_type == SIGN_EXTENDED_DEF) || (entry_type == ZERO_EXTENDED_DEF)) and
- his source_mode is not narrower then the the roots source_mode.
+ his source_mode is not narrower then the roots source_mode.
Return the number of relevant defs or negative number if something bad had
happened and the optimization should be aborted. */
Index: struct-equiv.c
===================================================================
--- struct-equiv.c (revision 122074)
+++ struct-equiv.c (working copy)
@@ -344,7 +344,7 @@ note_local_live (struct equiv_info *info
return x_change;
}
-/* Check if *XP is equivalent to Y. Until an an unreconcilable difference is
+/* Check if *XP is equivalent to Y. Until an unreconcilable difference is
found, use in-group changes with validate_change on *XP to make register
assignments agree. It is the (not necessarily direct) callers
responsibility to verify / confirm / cancel these changes, as appropriate.
@@ -570,7 +570,7 @@ rtx_equiv_p (rtx *xp, rtx y, int rvalue,
return false;
x_dest1 = XEXP (x, 0);
/* validate_change might have changed the destination. Put it back
- so that we can do a proper match for its role a an input. */
+ so that we can do a proper match for its role as an input. */
XEXP (x, 0) = x_dest0;
if (!rtx_equiv_p (&XEXP (x, 0), XEXP (y, 0), 1, info))
return false;
Index: tree-inline.c
===================================================================
--- tree-inline.c (revision 122074)
+++ tree-inline.c (working copy)
@@ -2288,7 +2288,7 @@ init_inline_once (void)
/* Estimating time for call is difficult, since we have no idea what the
called function does. In the current uses of eni_time_weights,
underestimating the cost does less harm than overestimating it, so
- we choose a rather small walue here. */
+ we choose a rather small value here. */
eni_time_weights.call_cost = 10;
eni_time_weights.div_mod_cost = 10;
eni_time_weights.switch_cost = 4;
Index: tree-ssa-loop-niter.c
===================================================================
--- tree-ssa-loop-niter.c (revision 122074)
+++ tree-ssa-loop-niter.c (working copy)
@@ -1060,7 +1060,7 @@ number_of_iterations_exit (struct loop *
return false;
/* We don't want to see undefined signed overflow warnings while
- computing the nmber of iterations. */
+ computing the number of iterations. */
fold_defer_overflow_warnings ();
iv0.base = expand_simple_operations (iv0.base);
Index: tree-vect-analyze.c
===================================================================
--- tree-vect-analyze.c (revision 122074)
+++ tree-vect-analyze.c (working copy)
@@ -1438,7 +1438,7 @@ vect_enhance_data_refs_alignment (loop_v
can make all data references satisfy vect_supportable_dr_alignment.
If so, update data structures as needed and return true. Note that
at this time vect_supportable_dr_alignment is known to return false
- for a a misaligned write.
+ for a misaligned write.
B) If peeling wasn't possible and there is a data reference with an
unknown misalignment that does not satisfy vect_supportable_dr_alignment
@@ -1812,7 +1812,7 @@ vect_analyze_data_ref_access (struct dat
{
/* Skip same data-refs. In case that two or more stmts share data-ref
(supported only for loads), we vectorize only the first stmt, and
- the rest get their vectorized loads from the the first one. */
+ the rest get their vectorized loads from the first one. */
if (!tree_int_cst_compare (DR_INIT (data_ref),
DR_INIT (STMT_VINFO_DATA_REF (
vinfo_for_stmt (next)))))
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c (revision 122074)
+++ tree-vect-transform.c (working copy)
@@ -1073,7 +1073,7 @@ get_initial_def_for_reduction (tree stmt
REDUCTION_PHI is the phi-node that carries the reduction computation.
This function:
- 1. Creates the reduction def-use cycle: sets the the arguments for
+ 1. Creates the reduction def-use cycle: sets the arguments for
REDUCTION_PHI:
The loop-entry argument is the vectorized initial-value of the reduction.
The loop-latch argument is VECT_DEF - the vector of partial sums.
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-02-03 16:48 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2007-02-03 16:48 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-02-03 Kazu Hirata <kazu@codesourcery.com>
* c-decl.c, config/avr/avr.c, config/avr/avr.h,
config/m68k/m68k.c, config/m68k/netbsd-elf.h,
config/mn10300/mn10300.c, config/pdp11/pdp11.h,
config/rs6000/cell.md, config/rs6000/darwin.h,
config/sh/sh.md, config/sh/sh4-300.md, config/spu/spu.c,
config/spu/spu.md, cselib.c, expr.c, haifa-sched.c, hwint.h,
jump.c, reload.c, sched-deps.c, sched-int.h, tree-inline.c,
tree-profile.c, tree-ssa-live.h, tree-vrp.c: Fix comment
typos. Follow spelling conventions.
* doc/invoke.texi: Follow spelling conventions.
Index: c-decl.c
===================================================================
--- c-decl.c (revision 121541)
+++ c-decl.c (working copy)
@@ -64,7 +64,7 @@ Software Foundation, 51 Franklin Street,
/* Set this to 1 if you want the standard ISO C99 semantics of 'inline'
when you specify -std=c99 or -std=gnu99, and to 0 if you want
- behaviour compatible with the nonstandard semantics implemented by
+ behavior compatible with the nonstandard semantics implemented by
GCC 2.95 through 4.2. */
#define WANT_C99_INLINE_SEMANTICS 1
Index: config/avr/avr.c
===================================================================
--- config/avr/avr.c (revision 121541)
+++ config/avr/avr.c (working copy)
@@ -5527,7 +5527,7 @@ avr_ret_register (void)
return 24;
}
-/* Ceate an RTX representing the place where a
+/* Create an RTX representing the place where a
library function returns a value of mode MODE. */
rtx
Index: config/avr/avr.h
===================================================================
--- config/avr/avr.h (revision 121541)
+++ config/avr/avr.h (working copy)
@@ -725,7 +725,7 @@ extern int avr_case_values_threshold;
#define CC1PLUS_SPEC "%{!frtti:-fno-rtti} \
%{!fenforce-eh-specs:-fno-enforce-eh-specs} \
%{!fexceptions:-fno-exceptions}"
-/* A C string constant that tells the GCC drvier program options to
+/* A C string constant that tells the GCC driver program options to
pass to `cc1plus'. */
#define ASM_SPEC "%{mmcu=avr25:-mmcu=avr2;\
Index: config/m68k/m68k.c
===================================================================
--- config/m68k/m68k.c (revision 121541)
+++ config/m68k/m68k.c (working copy)
@@ -462,7 +462,7 @@ override_options (void)
implementing architecture ARCH. -mcpu=CPU should override -march
and should generate code that runs on processor CPU, making free
use of any instructions that CPU understands. -mtune=UARCH applies
- on top of -mcpu or -march and optimises the code for UARCH. It does
+ on top of -mcpu or -march and optimizes the code for UARCH. It does
not change the target architecture. */
if (m68k_cpu_entry)
{
Index: config/m68k/netbsd-elf.h
===================================================================
--- config/m68k/netbsd-elf.h (revision 121541)
+++ config/m68k/netbsd-elf.h (working copy)
@@ -69,7 +69,7 @@ Boston, MA 02110-1301, USA. */
/* Provide an ASM_SPEC appropriate for NetBSD m68k ELF targets. We need
- to passn PIC code generation options. */
+ to pass PIC code generation options. */
#undef ASM_SPEC
#define ASM_SPEC "%(asm_cpu_spec) %{fpic|fpie:-k} %{fPIC|fPIE:-k -K}"
Index: config/mn10300/mn10300.c
===================================================================
--- config/mn10300/mn10300.c (revision 121541)
+++ config/mn10300/mn10300.c (working copy)
@@ -546,7 +546,7 @@ fp_regs_to_save (void)
/* Print a set of registers in the format required by "movm" and "ret".
Register K is saved if bit K of MASK is set. The data and address
registers can be stored individually, but the extended registers cannot.
- We assume that the mask alread takes that into account. For instance,
+ We assume that the mask already takes that into account. For instance,
bits 14 to 17 must have the same value. */
void
Index: config/pdp11/pdp11.h
===================================================================
--- config/pdp11/pdp11.h (revision 121541)
+++ config/pdp11/pdp11.h (working copy)
@@ -441,7 +441,7 @@ extern int current_first_parm_offset;
For the pdp11, this is nonzero to account for the return address.
1 - return address
2 - frame pointer (always saved, even when not used!!!!)
- -- chnage some day !!!:q!
+ -- change some day !!!:q!
*/
#define FIRST_PARM_OFFSET(FNDECL) 4
Index: config/rs6000/cell.md
===================================================================
--- config/rs6000/cell.md (revision 121541)
+++ config/rs6000/cell.md (working copy)
@@ -25,7 +25,7 @@
;; This file simulate PPU processor unit backend of pipeline, maualP24.
;; manual P27, stall and flush points
;; IU, XU, VSU, dispatcher decodes and dispatch 2 insns per cycle in program
-;; order, the grouped adress are aligned by 8
+;; order, the grouped address are aligned by 8
;; This file only simulate one thread situation
;; XU executes all fixed point insns(3 units, a simple alu, a complex unit,
;; and load/store unit)
Index: config/rs6000/darwin.h
===================================================================
--- config/rs6000/darwin.h (revision 121541)
+++ config/rs6000/darwin.h (working copy)
@@ -401,7 +401,7 @@ do { \
macro in the Apple version of GCC, except that version supports
'mac68k' alignment, and that version uses the computed alignment
always for the first field of a structure. The first-field
- behaviour is dealt with by
+ behavior is dealt with by
darwin_rs6000_special_round_type_align. */
#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
(TARGET_ALIGN_NATURAL ? (COMPUTED) \
Index: config/sh/sh.md
===================================================================
--- config/sh/sh.md (revision 121541)
+++ config/sh/sh.md (working copy)
@@ -4771,7 +4771,7 @@ (define_insn "truncdihi2"
; N.B. This should agree with LOAD_EXTEND_OP and movqi.
; Because we use zero extension, we can't provide signed QImode compares
-; using a simple compare or conditional banch insn.
+; using a simple compare or conditional branch insn.
(define_insn "truncdiqi2"
[(set (match_operand:QI 0 "general_movdst_operand" "=r,m")
(truncate:QI (match_operand:DI 1 "register_operand" "r,r")))]
Index: config/sh/sh4-300.md
===================================================================
--- config/sh/sh4-300.md (revision 121541)
+++ config/sh/sh4-300.md (working copy)
@@ -187,7 +187,7 @@ (define_insn_reservation "sh4_300_ocbwb"
;; Scheduling runs before reorg, so we approximate this by saying that we
;; want the call to be paired with a preceding insn.
;; In most cases, the insn that loads the address of the call should have
-;; a non-zero latency (mov rn,rm doesn't make sense since we could use rn
+;; a nonzero latency (mov rn,rm doesn't make sense since we could use rn
;; for the address then). Thus, a preceding insn that can be paired with
;; a call should be eligible for the delay slot.
;;
Index: config/spu/spu.c
===================================================================
--- config/spu/spu.c (revision 121541)
+++ config/spu/spu.c (working copy)
@@ -2792,7 +2792,7 @@ spu_handle_vector_attribute (tree * node
return NULL_TREE;
}
-/* Return non-zero if FUNC is a naked function. */
+/* Return nonzero if FUNC is a naked function. */
static int
spu_naked_function_p (tree func)
{
Index: config/spu/spu.md
===================================================================
--- config/spu/spu.md (revision 121541)
+++ config/spu/spu.md (working copy)
@@ -55,7 +55,7 @@ (define_insn_reservation "FP6" 6 (eq_att
(define_insn_reservation "FP7" 7 (eq_attr "type" "fp7")
"pipe0, fp, nothing*5")
-;; The behaviour of the double precision is that both pipes stall
+;; The behavior of the double precision is that both pipes stall
;; for 6 cycles and the the rest of the operation pipelines for
;; 7 cycles. The simplest way to model this is to simply ignore
;; the 6 cyle stall.
Index: cselib.c
===================================================================
--- cselib.c (revision 121541)
+++ cselib.c (working copy)
@@ -1453,7 +1453,7 @@ cselib_process_insn (rtx insn)
if (n_useless_values > MAX_USELESS_VALUES
/* remove_useless_values is linear in the hash table size. Avoid
- quadratic behaviour for very large hashtables with very few
+ quadratic behavior for very large hashtables with very few
useless elements. */
&& (unsigned int)n_useless_values > cselib_hash_table->n_elements / 4)
remove_useless_values ();
Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi (revision 121541)
+++ doc/invoke.texi (working copy)
@@ -1658,7 +1658,7 @@ when used within the DSO@. Enabling thi
on load and link times of a DSO as it massively reduces the size of the
dynamic export table when the library makes heavy use of templates.
-The behaviour of this switch is not quite the same as marking the
+The behavior of this switch is not quite the same as marking the
methods as hidden directly, because it does not affect static variables
local to the function or cause the compiler to deduce that
the function is defined in only one shared object.
Index: expr.c
===================================================================
--- expr.c (revision 121541)
+++ expr.c (working copy)
@@ -8988,7 +8988,7 @@ string_constant (tree arg, tree *ptr_off
&& TREE_CODE (array) != VAR_DECL)
return 0;
- /* Check if the array has a non-zero lower bound. */
+ /* Check if the array has a nonzero lower bound. */
lower_bound = array_ref_low_bound (TREE_OPERAND (arg, 0));
if (!integer_zerop (lower_bound))
{
Index: haifa-sched.c
===================================================================
--- haifa-sched.c (revision 121541)
+++ haifa-sched.c (working copy)
@@ -684,7 +684,7 @@ dep_cost (dep_t link)
rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
/* Make it self-cycled, so that if some tries to walk over this
- incomplete list he/she will be cought in an endless loop. */
+ incomplete list he/she will be caught in an endless loop. */
XEXP (dep_cost_rtx_link, 1) = dep_cost_rtx_link;
/* Targets use only REG_NOTE_KIND of the link. */
Index: hwint.h
===================================================================
--- hwint.h (revision 121541)
+++ hwint.h (working copy)
@@ -128,7 +128,7 @@ extern char sizeof_long_long_must_be_8[s
efficiently in hardware. (That is, the widest integer type that fits
in a hardware register.) Normally this is "long" but on some hosts it
should be "long long" or "__int64". This is no convenient way to
- autodect this, so such systems must set a flag in config.host; see there
+ autodetect this, so such systems must set a flag in config.host; see there
for details. */
#ifdef USE_LONG_LONG_FOR_WIDEST_FAST_INT
Index: jump.c
===================================================================
--- jump.c (revision 121541)
+++ jump.c (working copy)
@@ -1639,7 +1639,7 @@ redirect_jump_2 (rtx jump, rtx olabel, r
{
rtx note;
- /* negative DELETE_UNUSED used to be used to signalize behaviour on
+ /* Negative DELETE_UNUSED used to be used to signalize behavior on
moving FUNCTION_END note. Just sanity check that no user still worry
about this. */
gcc_assert (delete_unused >= 0);
Index: reload.c
===================================================================
--- reload.c (revision 121541)
+++ reload.c (working copy)
@@ -1717,7 +1717,7 @@ combine_reloads (void)
return;
/* If there is a reload for part of the address of this operand, we would
- need to chnage it to RELOAD_FOR_OTHER_ADDRESS. But that would extend
+ need to change it to RELOAD_FOR_OTHER_ADDRESS. But that would extend
its life to the point where doing this combine would not lower the
number of spill registers needed. */
for (i = 0; i < n_reloads; i++)
Index: sched-deps.c
===================================================================
--- sched-deps.c (revision 121541)
+++ sched-deps.c (working copy)
@@ -97,7 +97,7 @@ init_dep_1 (dep_t dep, rtx pro, rtx con,
/* Init DEP with the arguments.
While most of the scheduler (including targets) only need the major type
- of the dependency, it is convinient to hide full dep_status from them. */
+ of the dependency, it is convenient to hide full dep_status from them. */
void
init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
{
@@ -2425,7 +2425,7 @@ add_back_forw_dep (rtx insn, rtx elem, e
gcc_assert (deps_list_consistent_p (INSN_BACK_DEPS (insn)));
}
-/* Remove a dependency refered by L. */
+/* Remove a dependency referred to by L. */
void
delete_back_forw_dep (dep_link_t l)
{
Index: sched-int.h
===================================================================
--- sched-int.h (revision 121541)
+++ sched-int.h (working copy)
@@ -142,8 +142,8 @@ void copy_deps_list_change_con (deps_lis
void move_dep_link (dep_link_t, deps_list_t);
-/* Suppose we have a depedence Y between insn pro1 and con1, where pro1 has
- additional dependants con0 and con2, and con1 is dependant on additional
+/* Suppose we have a dependence Y between insn pro1 and con1, where pro1 has
+ additional dependents con0 and con2, and con1 is dependent on additional
insns pro0 and pro1:
.con0 pro0
Index: tree-inline.c
===================================================================
--- tree-inline.c (revision 121541)
+++ tree-inline.c (working copy)
@@ -2223,7 +2223,7 @@ estimate_num_insns_1 (tree *tp, int *wal
}
/* Estimate number of instructions that will be created by expanding EXPR.
- WEIGHTS contains weigths attributed to various constructs. */
+ WEIGHTS contains weights attributed to various constructs. */
int
estimate_num_insns (tree expr, eni_weights *weights)
Index: tree-profile.c
===================================================================
--- tree-profile.c (revision 121541)
+++ tree-profile.c (working copy)
@@ -64,7 +64,7 @@ static GTY(()) tree ptr_void;
/* Add code:
static gcov* __gcov_indirect_call_counters; // pointer to actual counter
- static void* __gcov_indirect_call_callee; // actual callee address
+ static void* __gcov_indirect_call_callee; // actual callee addres
*/
static void
tree_init_ic_make_global_vars (void)
Index: tree-ssa-live.h
===================================================================
--- tree-ssa-live.h (revision 121541)
+++ tree-ssa-live.h (working copy)
@@ -46,7 +46,7 @@ Boston, MA 02110-1301, USA. */
change. (ie, it is truly a view since it doesn't change anything)
The final component of the data structure is the basevar map. This provides
- a list of all the different base variables which occue in a partition view,
+ a list of all the different base variables which occur in a partition view,
and a unique index for each one. Routines are provided to quickly produce
the base variable of a partition.
Index: tree-vrp.c
===================================================================
--- tree-vrp.c (revision 121541)
+++ tree-vrp.c (working copy)
@@ -3481,7 +3481,7 @@ insert_range_assertions (void)
/* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
and "struct" hacks. If VRP can determine that the
- array subscript is a contant, check if it is outside valid
+ array subscript is a constant, check if it is outside valid
range. If the array subscript is a RANGE, warn if it is
non-overlapping with valid range.
IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [patch] gcc/*: Fix comment typos.
2007-01-31 4:00 Kazu Hirata
@ 2007-01-31 5:06 ` Brooks Moses
0 siblings, 0 replies; 38+ messages in thread
From: Brooks Moses @ 2007-01-31 5:06 UTC (permalink / raw)
To: gcc-patches
Kazu Hirata wrote:
> Index: config/arm/arm.c
> ===================================================================
> --- config/arm/arm.c (revision 121370)
> +++ config/arm/arm.c (working copy)
> @@ -7710,7 +7710,7 @@ get_jump_table_size (rtx insn)
> switch (modesize)
> {
> case 1:
> - /* Round up size of TBB table to a haflword boundary. */
> + /* Round up size of TBB table to a halfword boundary. */
> size = (size + 1) & ~(HOST_WIDE_INT)1;
> break;
> case 2:
Looks like an extra space snuck into that one. :)
Meanwhile, because I've been meaning to ask: I presume these patches are
coming from some sort of automated spellcheck. What programs do you use
to do this, in order to deal with only checking the comments and to deal
with Texinfo syntax in the .texi files?
- Brooks
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2007-01-31 4:00 Kazu Hirata
2007-01-31 5:06 ` Brooks Moses
0 siblings, 1 reply; 38+ messages in thread
From: Kazu Hirata @ 2007-01-31 4:00 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2007-01-31 Kazu Hirata <kazu@codesourcery.com>
* cgraphunit.c, config/arm/arm.c, config/m68k/m68k.c,
ipa-inline.c, tree-profile.c, tree-ssa-live.c,
tree-ssa-math-opts.c, tree-ssanames.c, tree-vect-analyze.c,
value-prof.c: Fix comment typos.
Index: cgraphunit.c
===================================================================
--- cgraphunit.c (revision 121370)
+++ cgraphunit.c (working copy)
@@ -783,7 +783,7 @@ process_function_and_variable_attributes
/* Process CGRAPH_NODES_NEEDED queue, analyze each function (and transitively
each reachable functions) and build cgraph.
The function can be called multiple times after inserting new nodes
- into beggining of queue. Just the new part of queue is re-scanned then. */
+ into beginning of queue. Just the new part of queue is re-scanned then. */
static void
cgraph_analyze_functions (void)
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c (revision 121370)
+++ config/arm/arm.c (working copy)
@@ -7710,7 +7710,7 @@ get_jump_table_size (rtx insn)
switch (modesize)
{
case 1:
- /* Round up size of TBB table to a haflword boundary. */
+ /* Round up size of TBB table to a halfword boundary. */
size = (size + 1) & ~(HOST_WIDE_INT)1;
break;
case 2:
Index: config/m68k/m68k.c
===================================================================
--- config/m68k/m68k.c (revision 121370)
+++ config/m68k/m68k.c (working copy)
@@ -1957,7 +1957,7 @@ m68k_rtx_costs (rtx x, int code, int out
}
}
-/* Return an instruction to move CONST_INT OPERANDS[1] into data regsiter
+/* Return an instruction to move CONST_INT OPERANDS[1] into data register
OPERANDS[0]. */
static const char *
@@ -2838,7 +2838,7 @@ notice_update_cc (rtx exp, rtx insn)
codes. Normal moves _do_ set the condition codes, but not in
a way that is appropriate for comparison with 0, because -0.0
would be treated as a negative nonzero number. Note that it
- isn't appropriate to conditionalize this restiction on
+ isn't appropriate to conditionalize this restriction on
HONOR_SIGNED_ZEROS because that macro merely indicates whether
we care about the difference between -0.0 and +0.0. */
else if (!FP_REG_P (SET_DEST (exp))
Index: ipa-inline.c
===================================================================
--- ipa-inline.c (revision 121370)
+++ ipa-inline.c (working copy)
@@ -91,11 +91,11 @@ Software Foundation, 51 Franklin Street,
maintained by pass manager). The functions after inlining are early
optimized so the early inliner sees unoptimized function itself, but
all considered callees are already optimized allowing it to unfold
- abstraction penalty on C++ effectivly and cheaply.
+ abstraction penalty on C++ effectively and cheaply.
pass_ipa_early_inlining
- With profiling, the early inlining is also neccesary to reduce
+ With profiling, the early inlining is also necessary to reduce
instrumentation costs on program with high abstraction penalty (doing
many redundant calls). This can't happen in parallel with early
optimization and profile instrumentation, because we would end up
@@ -751,7 +751,7 @@ cgraph_set_inline_failed (struct cgraph_
e->inline_failed = reason;
}
-/* Given whole compilation unit esitmate of INSNS, compute how large we can
+/* Given whole compilation unit estimate of INSNS, compute how large we can
allow the unit to grow. */
static int
compute_max_insns (int insns)
@@ -1043,7 +1043,7 @@ cgraph_decide_inlining (void)
e->caller->global.insns);
}
/* Inlining self recursive function might introduce new calls to
- thsemselves we didn't see in the loop above. Fill in the proper
+ themselves we didn't see in the loop above. Fill in the proper
reason why inline failed. */
for (e = node->callers; e; e = e->next_caller)
if (e->inline_failed)
@@ -1126,7 +1126,7 @@ cgraph_decide_inlining (void)
recursive inlining, but as an special case, we want to try harder inline
ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
flatten, b being always inline. Flattening 'a' will collapse
- a->b->c before hitting cycle. To accomondate always inline, we however
+ a->b->c before hitting cycle. To accommodate always inline, we however
need to inline a->b->c->b.
So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
@@ -1145,7 +1145,7 @@ try_inline (struct cgraph_edge *e, enum
mode yet. and the function in question is always_inline. */
if (always_inline && mode != INLINE_ALWAYS_INLINE)
mode = INLINE_ALWAYS_INLINE;
- /* Otheriwse it is time to give up. */
+ /* Otherwise it is time to give up. */
else
{
if (dump_file)
Index: tree-profile.c
===================================================================
--- tree-profile.c (revision 121370)
+++ tree-profile.c (working copy)
@@ -64,7 +64,7 @@ static GTY(()) tree ptr_void;
/* Add code:
static gcov* __gcov_indirect_call_counters; // pointer to actual counter
- static void* __gcov_indirect_call_callee; // actual callie addres
+ static void* __gcov_indirect_call_callee; // actual callee address
*/
static void
tree_init_ic_make_global_vars (void)
@@ -269,7 +269,7 @@ tree_gen_one_value_profiler (histogram_v
/* Output instructions as GIMPLE trees for code to find the most
common called function in indirect call.
- VALUE is the call expression whose indirect callie is profiled.
+ VALUE is the call expression whose indirect callee is profiled.
TAG is the tag of the section for counters, BASE is offset of the
counter position. */
@@ -308,7 +308,7 @@ tree_gen_ic_profiler (histogram_value va
/* Output instructions as GIMPLE trees for code to find the most
common called function in indirect call. Insert instructions at the
- begining of every possible called function.
+ beginning of every possible called function.
*/
static void
Index: tree-ssa-live.c
===================================================================
--- tree-ssa-live.c (revision 121370)
+++ tree-ssa-live.c (working copy)
@@ -504,7 +504,7 @@ remove_unused_locals (void)
/* Remove unused variables from REFERENCED_VARs. As an special exception
keep the variables that are believed to be aliased. Those can't be
- easilly removed from the alias sets and and operand caches.
+ easily removed from the alias sets and and operand caches.
They will be removed shortly after next may_alias pass is performed. */
FOR_EACH_REFERENCED_VAR (t, rvi)
if (!is_global_var (t)
Index: tree-ssa-math-opts.c
===================================================================
--- tree-ssa-math-opts.c (revision 121370)
+++ tree-ssa-math-opts.c (working copy)
@@ -521,9 +521,9 @@ struct tree_opt_pass pass_cse_reciprocal
0 /* letter */
};
-/* Records an occurance at statement USE_STMT in the vector of trees
+/* Records an occurrence at statement USE_STMT in the vector of trees
STMTS if it is dominated by *TOP_BB or dominates it or this basic block
- is not yet initialized. Returns true if the occurance was pushed on
+ is not yet initialized. Returns true if the occurrence was pushed on
the vector. Adjusts *TOP_BB to be the basic block dominating all
statements in the vector. */
Index: tree-ssanames.c
===================================================================
--- tree-ssanames.c (revision 121370)
+++ tree-ssanames.c (working copy)
@@ -318,7 +318,7 @@ release_dead_ssa_names (void)
referenced_var_iterator rvi;
/* Current defs point to various dead SSA names that in turn points to dead
- statements so bunch of dead memory is holded from releasing. */
+ statements so bunch of dead memory is held from releasing. */
FOR_EACH_REFERENCED_VAR (t, rvi)
set_current_def (t, NULL);
/* Now release the freelist. */
@@ -328,7 +328,7 @@ release_dead_ssa_names (void)
/* Dangling pointers might make GGC to still see dead SSA names, so it is
important to unlink the list and avoid GGC from seeing all subsequent
SSA names. In longer run we want to have all dangling pointers here
- removed (since they usually go trhough dead statements that consume
+ removed (since they usually go through dead statements that consume
considerable amounts of memory). */
TREE_CHAIN (t) = NULL_TREE;
n++;
Index: tree-vect-analyze.c
===================================================================
--- tree-vect-analyze.c (revision 121370)
+++ tree-vect-analyze.c (working copy)
@@ -164,7 +164,7 @@ vect_determine_vectorization_factor (loo
arguments (e.g. demotion, promotion), vectype will be reset
appropriately (later). Note that we have to visit the smallest
datatype in this function, because that determines the VF.
- If the samallest datatype in the loop is present only as the
+ If the smallest datatype in the loop is present only as the
rhs of a promotion operation - we'd miss it here.
However, in such a case, that a variable of this datatype
does not appear in the lhs anywhere in the loop, it shouldn't
@@ -1752,7 +1752,7 @@ vect_analyze_data_ref_access (struct dat
return false;
}
- /* Check that there is no load-store dependecies for this loads
+ /* Check that there is no load-store dependencies for this loads
to prevent a case of load-store-load to the same location. */
if (DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
|| DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
Index: value-prof.c
===================================================================
--- value-prof.c (revision 121370)
+++ value-prof.c (working copy)
@@ -63,7 +63,7 @@ static struct value_prof_hooks *value_pr
3) Indirect/virtual call specialization. If we can determine most
common function callee in indirect/virtual call. We can use this
- information to improve code effectivity (espetialy info for
+ information to improve code effectiveness (especially info for
inliner).
Every such optimization should add its requirements for profiled values to
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-12-22 12:50 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2006-12-22 12:50 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-12-22 Kazu Hirata <kazu@codesourcery.com>
* config/elfos.h, config/spu/spu.c, tree-ssa-operands.h,
tree-ssa-ter.c: Fix comment typos.
Index: config/elfos.h
===================================================================
--- config/elfos.h (revision 120147)
+++ config/elfos.h (working copy)
@@ -507,7 +507,7 @@ Boston, MA 02110-1301, USA. */
/* A C statement (sans semicolon) to output to the stdio stream STREAM
any text necessary for declaring the name of an external symbol
- named NAME whch is referenced in this compilation but not defined.
+ named NAME which is referenced in this compilation but not defined.
It is needed to properly support non-default visibility. */
#ifndef ASM_OUTPUT_EXTERNAL
Index: config/spu/spu.c
===================================================================
--- config/spu/spu.c (revision 120147)
+++ config/spu/spu.c (working copy)
@@ -1606,7 +1606,7 @@ spu_expand_prologue (void)
{
if (flag_stack_check)
{
- /* We compare agains total_size-1 because
+ /* We compare against total_size-1 because
($sp >= total_size) <=> ($sp > total_size-1) */
rtx scratch_v4si = gen_rtx_REG (V4SImode, REGNO (scratch_reg_0));
rtx sp_v4si = gen_rtx_REG (V4SImode, STACK_POINTER_REGNUM);
@@ -2386,7 +2386,7 @@ cpat_info(unsigned char *arr, int size,
}
/* OP is a CONSTANT_P. Determine what instructions can be used to load
- it into a regiser. MODE is only valid when OP is a CONST_INT. */
+ it into a register. MODE is only valid when OP is a CONST_INT. */
static enum immediate_class
classify_immediate (rtx op, enum machine_mode mode)
{
Index: tree-ssa-operands.h
===================================================================
--- tree-ssa-operands.h (revision 120147)
+++ tree-ssa-operands.h (working copy)
@@ -108,7 +108,7 @@ struct voptype_d
typedef struct voptype_d *voptype_p;
/* This structure represents a variable sized buffer which is allocated by the
- operand memory manager. Operands are subalocated out of this block. The
+ operand memory manager. Operands are suballocated out of this block. The
MEM array varies in size. */
struct ssa_operand_memory_d GTY((chain_next("%h.next")))
Index: tree-ssa-ter.c
===================================================================
--- tree-ssa-ter.c (revision 120147)
+++ tree-ssa-ter.c (working copy)
@@ -402,7 +402,7 @@ is_replaceable_p (tree stmt)
return false;
}
- /* Leave any stmt with voltile operands alone as well. */
+ /* Leave any stmt with volatile operands alone as well. */
if (stmt_ann (stmt)->has_volatile_ops)
return false;
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [patch] gcc/*: Fix comment typos.
2006-12-22 1:20 Kazu Hirata
@ 2006-12-22 1:25 ` Ian Lance Taylor
0 siblings, 0 replies; 38+ messages in thread
From: Ian Lance Taylor @ 2006-12-22 1:25 UTC (permalink / raw)
To: Kazu Hirata; +Cc: gcc-patches
Kazu Hirata <kazu@codesourcery.com> writes:
> - used by C++ frotend to explicitely mark the keyed methods.
> + used by C++ frontend to explicitely mark the keyed methods.
Hmmm, fixed one typo, missed another.
Ian
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-12-22 1:20 Kazu Hirata
2006-12-22 1:25 ` Ian Lance Taylor
0 siblings, 1 reply; 38+ messages in thread
From: Kazu Hirata @ 2006-12-22 1:20 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-12-22 Kazu Hirata <kazu@codesourcery.com>
* cgraphunit.c, config/i386/i386.c, config/ia64/ia64.c, cse.c,
output.h, toplev.c, tree-affine.h, tree-flow.h,
tree-ssa-alias.c, tree-ssa-coalesce.c, tree-ssa-live.c,
tree-ssa-live.h, tree-ssa-operands.c, tree-ssa-ter.c,
tree-vrp.c, varpool.c: Fix comment typos.
Index: cgraphunit.c
===================================================================
--- cgraphunit.c (revision 120136)
+++ cgraphunit.c (working copy)
@@ -66,7 +66,7 @@ Software Foundation, 51 Franklin Street,
data structure must be updated accordingly by this function.
There should be little need to call this function and all the references
should be made explicit to cgraph code. At present these functions are
- used by C++ frotend to explicitely mark the keyed methods.
+ used by C++ frontend to explicitely mark the keyed methods.
- analyze_expr callback
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 120136)
+++ config/i386/i386.c (working copy)
@@ -13492,7 +13492,7 @@ decide_alignment (int align,
return desired_align;
}
-/* Return thre smallest power of 2 greater than VAL. */
+/* Return the smallest power of 2 greater than VAL. */
static int
smallest_pow2_greater_than (int val)
{
@@ -13510,7 +13510,7 @@ smallest_pow2_greater_than (int val)
1) Prologue guard: Conditional that jumps up to epilogues for small
blocks that can be handled by epilogue alone. This is faster but
also needed for correctness, since prologue assume the block is larger
- than the desrired alignment.
+ than the desired alignment.
Optional dynamic check for size and libcall for large
blocks is emitted here too, with -minline-stringops-dynamically.
@@ -13834,7 +13834,7 @@ promote_duplicated_reg_to_size (rtx val,
/* Expand string clear operation (bzero). Use i386 string operations when
profitable. See expand_movmem comment for explanation of individual
- steps performd. */
+ steps performed. */
int
ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
rtx expected_align_exp, rtx expected_size_exp)
Index: config/ia64/ia64.c
===================================================================
--- config/ia64/ia64.c (revision 120136)
+++ config/ia64/ia64.c (working copy)
@@ -9182,7 +9182,7 @@ ia64_asm_output_external (FILE *file, tr
if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
{
/* maybe_assemble_visibility will return 1 if the assembler
- visibility directive is outputed. */
+ visibility directive is output. */
int need_visibility = ((*targetm.binds_local_p) (decl)
&& maybe_assemble_visibility (decl));
Index: cse.c
===================================================================
--- cse.c (revision 120136)
+++ cse.c (working copy)
@@ -5791,7 +5791,7 @@ cse_process_notes (rtx x, rtx object)
Otherwise, DATA->path is filled and the function returns TRUE indicating
that a path to follow was found.
- If FOLLOW_JUMPS is false, the maximum path lenghth is 1 and the only
+ If FOLLOW_JUMPS is false, the maximum path length is 1 and the only
block in the path will be FIRST_BB. */
static bool
@@ -6248,7 +6248,7 @@ cse_main (rtx f ATTRIBUTE_UNUSED, int nr
if (ebb_data.nsets == 0)
continue;
- /* Get a reasonable extimate for the maximum number of qty's
+ /* Get a reasonable estimate for the maximum number of qty's
needed for this path. For this, we take the number of sets
and multiply that by MAX_RECOG_OPERANDS. */
max_qty = ebb_data.nsets * MAX_RECOG_OPERANDS;
Index: output.h
===================================================================
--- output.h (revision 120136)
+++ output.h (working copy)
@@ -204,7 +204,7 @@ extern void assemble_variable (tree, int
DONT_OUTPUT_DATA is from assemble_variable. */
extern void align_variable (tree decl, bool dont_output_data);
-/* Queue for outputing something to declare an external symbol to the
+/* Queue for outputting something to declare an external symbol to the
assembler. (Most assemblers don't need this, so we normally output
nothing.) Do nothing if DECL is not external. */
extern void assemble_external (tree);
Index: toplev.c
===================================================================
--- toplev.c (revision 120136)
+++ toplev.c (working copy)
@@ -1249,7 +1249,7 @@ print_single_switch (print_switch_fn_typ
{
/* The ultrix fprintf returns 0 on success, so compute the result
we want here since we need it for the following test. The +1
- is for the seperator character that will probably be emitted. */
+ is for the separator character that will probably be emitted. */
int len = strlen (text) + 1;
if (pos != 0
Index: tree-affine.h
===================================================================
--- tree-affine.h (revision 120136)
+++ tree-affine.h (working copy)
@@ -49,7 +49,7 @@ typedef struct affine_tree_combination
TYPE, but their sizes must be the same (STRIP_NOPS is applied to the
elements).
- The coefficients are always sign extened from the precision of TYPE
+ The coefficients are always sign extended from the precision of TYPE
(regardless of signedness of TYPE). */
struct aff_comb_elt elts[MAX_AFF_ELTS];
Index: tree-flow.h
===================================================================
--- tree-flow.h (revision 120136)
+++ tree-flow.h (working copy)
@@ -288,7 +288,7 @@ struct var_ann_d GTY(())
unsigned int escape_mask;
};
-/* Contianer for variable annotation used by hashtable for annotations for
+/* Container for variable annotation used by hashtable for annotations for
static variables. */
struct static_var_ann_d GTY(())
{
Index: tree-ssa-alias.c
===================================================================
--- tree-ssa-alias.c (revision 120136)
+++ tree-ssa-alias.c (working copy)
@@ -1710,7 +1710,7 @@ setup_pointers_and_addressables (struct
{
/* The memory partition holding VAR will no longer
contain VAR, and statements referencing it will need
- to be udpated. */
+ to be updated. */
if (memory_partition (var))
mark_sym_for_renaming (memory_partition (var));
Index: tree-ssa-coalesce.c
===================================================================
--- tree-ssa-coalesce.c (revision 120136)
+++ tree-ssa-coalesce.c (working copy)
@@ -182,7 +182,7 @@ coalesce_pair_map_hash (const void *pair
/* Equality function for coalesce list hash table. Compare PAIR1 and PAIR2,
- returning TRUE if the two pairs are equivilent. */
+ returning TRUE if the two pairs are equivalent. */
static int
coalesce_pair_map_eq (const void *pair1, const void *pair2)
@@ -309,7 +309,7 @@ add_coalesce (coalesce_list_p cl, int p1
}
-/* Comparison function to allow qsort to sort P1 and P2 in Ascendiong order. */
+/* Comparison function to allow qsort to sort P1 and P2 in Ascending order. */
static int
compare_pairs (const void *p1, const void *p2)
@@ -355,7 +355,7 @@ end_coalesce_pair_p (coalesce_pair_itera
}
-/* Return the next parttition pair to be visited by ITER. */
+/* Return the next partition pair to be visited by ITER. */
static inline coalesce_pair_p
next_coalesce_pair (coalesce_pair_iterator *iter)
@@ -466,7 +466,7 @@ dump_coalesce_list (FILE *f, coalesce_li
/* This represents a conflict graph. Implemented as an array of bitmaps.
- A full matrix isused for conflicts rather than just upper triangular form.
+ A full matrix is used for conflicts rather than just upper triangular form.
this make sit much simpler and faster to perform conflict merges. */
typedef struct ssa_conflicts_d
@@ -787,9 +787,9 @@ live_track_clear_base_vars (live_track_p
/* Build a conflict graph based on LIVEINFO. Any partitions which are in the
- partition view of the var_map liveinfo is based on get entires in the
+ partition view of the var_map liveinfo is based on get entries in the
conflict graph. Only conflicts between ssa_name partitions with the same
- base variableare added. */
+ base variable are added. */
static ssa_conflicts_p
build_ssa_conflict_graph (tree_live_info_p liveinfo)
@@ -1140,7 +1140,7 @@ create_outofssa_var_map (coalesce_list_p
}
-/* Attempt to coalesce ssa verisons X and Y together using the partition
+/* Attempt to coalesce ssa versions X and Y together using the partition
mapping in MAP and checking conflicts in GRAPH. Output any debug info to
DEBUG, if it is nun-NULL. */
@@ -1219,8 +1219,8 @@ coalesce_partitions (var_map map, ssa_co
edge e;
edge_iterator ei;
- /* First, coalece all the copie across abnormal edges. These are not placed
- in the coalesce list becase they do not need to be sorted, and simply
+ /* First, coalesce all the copies across abnormal edges. These are not placed
+ in the coalesce list because they do not need to be sorted, and simply
consume extra memory/compilation time in large programs. */
FOR_EACH_BB (bb)
Index: tree-ssa-live.c
===================================================================
--- tree-ssa-live.c (revision 120136)
+++ tree-ssa-live.c (working copy)
@@ -560,7 +560,7 @@ delete_tree_live_info (tree_live_info_p
/* Visit basic block BB and propogate any required live on entry bits from
LIVE into the predecessors. VISITED is the bitmap of visited blocks.
- TMP is a temporary work bitmap which is passed in to avoid reallocting
+ TMP is a temporary work bitmap which is passed in to avoid reallocating
it each time. */
static void
@@ -602,7 +602,7 @@ loe_visit_block (tree_live_info_p live,
/* Using LIVE, fill in all the live-on-entry blocks between the defs and uses
- of all the vairables. */
+ of all the variables. */
static void
live_worklist (tree_live_info_p live)
@@ -631,7 +631,7 @@ live_worklist (tree_live_info_p live)
}
-/* Calulate the initial live on entry vector for SSA_NAME using immediate_use
+/* Calculate the initial live on entry vector for SSA_NAME using immediate_use
links. Set the live on entry fields in LIVE. Def's are marked temporarily
in the liveout vector. */
Index: tree-ssa-live.h
===================================================================
--- tree-ssa-live.h (revision 120136)
+++ tree-ssa-live.h (working copy)
@@ -31,8 +31,8 @@ Boston, MA 02110-1301, USA. */
/* Used to create the variable mapping when we go out of SSA form.
Mapping from an ssa_name to a partition number is maintained, as well as
- partition number to back to ssa_name. A parition can also be represented
- by a non-ssa_name variable. This allows ssa_names and thier partition to
+ partition number to back to ssa_name. A partition can also be represented
+ by a non-ssa_name variable. This allows ssa_names and their partition to
be coalesced with live on entry compiler variables, as well as eventually
having real compiler variables assigned to each partition as part of the
final stage of going of of ssa.
@@ -43,7 +43,7 @@ Boston, MA 02110-1301, USA. */
partitions. This allows the coalescer to decide what partitions are
interesting to it, and only work with those partitions. Whenever the view
is changed, the partition numbers change, but none of the partition groupings
- change. (ie, it is truly a view since it doesnt change anything)
+ change. (ie, it is truly a view since it doesn't change anything)
The final component of the data structure is the basevar map. This provides
a list of all the different base variables which occue in a partition view,
Index: tree-ssa-operands.c
===================================================================
--- tree-ssa-operands.c (revision 120136)
+++ tree-ssa-operands.c (working copy)
@@ -468,8 +468,8 @@ ssa_operand_alloc (unsigned size)
if (size > ssa_operand_mem_size)
ssa_operand_mem_size = OP_SIZE_3 * sizeof (struct voptype_d);
- /* Fail if there is not enough space. If thre are this many operands
- required, first make sure there isn't a different probem causing this
+ /* Fail if there is not enough space. If there are this many operands
+ required, first make sure there isn't a different problem causing this
many operands. If the decision is that this is OK, then we can
specially allocate a buffer just for this request. */
gcc_assert (size <= ssa_operand_mem_size);
@@ -607,7 +607,7 @@ add_use_op (tree stmt, tree *op, use_opt
/* Return a virtual op pointer with NUM elements which are all initialized to OP
- and are linked into the immeidate uses for STMT. The new vop is appended
+ and are linked into the immediate uses for STMT. The new vop is appended
after PREV. */
static inline voptype_p
Index: tree-ssa-ter.c
===================================================================
--- tree-ssa-ter.c (revision 120136)
+++ tree-ssa-ter.c (working copy)
@@ -80,13 +80,13 @@ Boston, MA 02110-1301, USA. */
v_9 = a_2 * n_12
<...>
- If b_5, b_8 and b_14 are all colaesced together...
+ If b_5, b_8 and b_14 are all coalesced together...
The expression b_5 + 6 CANNOT replace the use in the statement defining v_9
because b_8 is in fact killing the value of b_5 since they share a partition
- and will be assigned the same memory or regster location.
+ and will be assigned the same memory or register location.
TER implements this but stepping through the instructions in a block and
- tracking potential expressions for replacement, and the paritions they are
+ tracking potential expressions for replacement, and the partitions they are
dependent on. Expressions are represented by the SSA_NAME_VERSION of the
DEF on the LHS of a GIMPLE_MODIFY_STMT and the expression is the RHS.
@@ -110,8 +110,8 @@ Boston, MA 02110-1301, USA. */
an expression from the partition kill lists when a decision is made whether
to replace it or not. This is indexed by ssa version number as well, and
indicates a partition number. virtual operands are not tracked individually,
- but they are summarized by an artifical partition called VIRTUAL_PARTITION.
- This means a MAY or MUST def will kill *ALL* expressions that are dependant
+ but they are summarized by an artificial partition called VIRTUAL_PARTITION.
+ This means a MAY or MUST def will kill *ALL* expressions that are dependent
on a virtual operand.
Note that the EXPR_DECL_UID and this bitmap represent very similar
information, but the info in one is not easy to obtain from the other.
@@ -121,11 +121,11 @@ Boston, MA 02110-1301, USA. */
longer be valid if a definition into this partition takes place.
PARTITION_IN_USE is simply a bitmap which is used to track which partitions
- currently have sokmething in their kill list. This is used at the end of
+ currently have something in their kill list. This is used at the end of
a block to clear out the KILL_LIST bitmaps at the end of each block.
NEW_REPLACEABLE_DEPENDENCIES is used as a temporary place to store
- dependencies which will be reused by the current defintion. ALl the uses
+ dependencies which will be reused by the current definition. ALl the uses
on an expression are processed before anything else is done. If a use is
determined to be a replaceable expression AND the current stmt is also going
to be replaceable, all the dependencies of this replaceable use will be
@@ -161,8 +161,8 @@ typedef struct temp_expr_table_d
tree *replaceable_expressions; /* Replacement expression table. */
bitmap *expr_decl_uids; /* Base uids of exprs. */
bitmap *kill_list; /* Expr's killed by a partition. */
- int virtual_partition; /* Psuedo partition for virtual ops. */
- bitmap partition_in_use; /* Partitions with kill entires. */
+ int virtual_partition; /* Pseudo partition for virtual ops. */
+ bitmap partition_in_use; /* Partitions with kill entries. */
bitmap new_replaceable_dependencies; /* Holding place for pending dep's. */
int *num_in_part; /* # of ssa_names in a partition. */
} *temp_expr_table_p;
@@ -256,7 +256,7 @@ version_to_be_replaced_p (temp_expr_tabl
}
-/* Add partition P to the list if partititons VERSION is dependent on. TAB is
+/* Add partition P to the list if partitions VERSION is dependent on. TAB is
the expression table */
static inline void
Index: tree-vrp.c
===================================================================
--- tree-vrp.c (revision 120136)
+++ tree-vrp.c (working copy)
@@ -626,8 +626,8 @@ compare_values (tree val1, tree val2)
/* If VAL1 is different than VAL2, return +2.
For integer constants we either have already returned -1 or 1
- or they are equivalent. We still might suceed prove something
- about non-trivial operands. */
+ or they are equivalent. We still might succeed in proving
+ something about non-trivial operands. */
if (TREE_CODE (val1) != INTEGER_CST
|| TREE_CODE (val2) != INTEGER_CST)
{
Index: varpool.c
===================================================================
--- varpool.c (revision 120136)
+++ varpool.c (working copy)
@@ -40,7 +40,7 @@ Software Foundation, 51 Franklin Street,
and drives the decision process on what variables and when are
going to be compiled.
- The varpool nodes are alocated lazilly for declarations
+ The varpool nodes are allocated lazily for declarations
either by frontend or at callgraph construction time.
All variables supposed to be output into final file needs to be
explicitely marked by frontend via VARPOOL_FINALIZE_DECL function. */
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-12-05 8:27 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2006-12-05 8:27 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-12-05 Kazu Hirata <kazu@codesourcery.com>
* config/i386/darwin.h, config/spu/spu.c, tree-ssa-live.c,
tree-vect-generic.c, tree-vect-transform.c: Fix comment typos.
Index: config/i386/darwin.h
===================================================================
--- config/i386/darwin.h (revision 119532)
+++ config/i386/darwin.h (working copy)
@@ -188,7 +188,7 @@ extern void darwin_x86_file_end (void);
} while (0)
/* Darwin on x86_64 uses dwarf-2 by default. Pre-darwin9 32-bit
- compiles defaut to stabs+. darwin9+ defaults to dwarf-2. */
+ compiles default to stabs+. darwin9+ defaults to dwarf-2. */
#ifndef DARWIN_PREFER_DWARF
#undef PREFERRED_DEBUGGING_TYPE
#define PREFERRED_DEBUGGING_TYPE (TARGET_64BIT ? DWARF2_DEBUG : DBX_DEBUG)
Index: config/spu/spu.c
===================================================================
--- config/spu/spu.c (revision 119532)
+++ config/spu/spu.c (working copy)
@@ -3707,7 +3707,7 @@ spu_rtx_costs (rtx x, int code, int oute
/* Folding to a CONST_VECTOR will use extra space but there might
be only a small savings in cycles. We'd like to use a CONST_VECTOR
- only if it allows us to fold away multiple insns. Changin the cost
+ only if it allows us to fold away multiple insns. Changing the cost
of a CONST_VECTOR here (or in CONST_COSTS) doesn't help though
because this cost will only be compared against a single insn.
if (code == CONST_VECTOR)
Index: tree-ssa-live.c
===================================================================
--- tree-ssa-live.c (revision 119532)
+++ tree-ssa-live.c (working copy)
@@ -984,7 +984,7 @@ partition_pair_map_hash (const void *pai
}
-/* Return TRUE if PAIR1 is equivilent to PAIR2. */
+/* Return TRUE if PAIR1 is equivalent to PAIR2. */
int
partition_pair_map_eq (const void *pair1, const void *pair2)
@@ -1112,7 +1112,7 @@ add_coalesce (coalesce_list_p cl, int p1
}
-/* Comparison function to allow qsort to sort P1 and P2 in Ascendiong order. */
+/* Comparison function to allow qsort to sort P1 and P2 in Ascending order. */
static
int compare_pairs (const void *p1, const void *p2)
Index: tree-vect-generic.c
===================================================================
--- tree-vect-generic.c (revision 119532)
+++ tree-vect-generic.c (working copy)
@@ -411,7 +411,7 @@ expand_vector_operations_1 (block_stmt_i
gcc_assert (code != CONVERT_EXPR);
op = optab_for_tree_code (code, type);
- /* For widening/narrowgin vector operations, the relevant type is of the
+ /* For widening/narrowing vector operations, the relevant type is of the
arguments, not the widened result. */
if (code == WIDEN_SUM_EXPR
|| code == VEC_WIDEN_MULT_HI_EXPR
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c (revision 119532)
+++ tree-vect-transform.c (working copy)
@@ -3852,7 +3852,7 @@ vect_transform_stmt (tree stmt, block_st
{
/* In case of interleaving, the whole chain is vectorized when the
last store in the chain is reached. Store stmts before the last
- one are skipped, and there vec_stmt_info shoudn't be freed
+ one are skipped, and there vec_stmt_info shouldn't be freed
meanwhile. */
*strided_store = true;
if (STMT_VINFO_VEC_STMT (stmt_info))
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [patch] gcc/*: Fix comment typos.
2006-12-02 2:26 Kazu Hirata
@ 2006-12-02 14:26 ` Rask Ingemann Lambertsen
0 siblings, 0 replies; 38+ messages in thread
From: Rask Ingemann Lambertsen @ 2006-12-02 14:26 UTC (permalink / raw)
To: Kazu Hirata; +Cc: gcc-patches
On Fri, Dec 01, 2006 at 06:26:16PM -0800, Kazu Hirata wrote:
> Hi,
>
> Committed as obvious.
> Index: config/sh/sh.c
> ===================================================================
> --- config/sh/sh.c (revision 119436)
> +++ config/sh/sh.c (working copy)
> @@ -2413,7 +2413,7 @@ sh_rtx_costs (rtx x, int code, int outer
> && CONST_OK_FOR_K08 (INTVAL (x)))
> *total = 1;
> /* prepare_cmp_insn will force costly constants int registers before
> - the cbrach[sd]i4 pattterns can see them, so preserve potentially
> + the cbrach[sd]i4 patterns can see them, so preserve potentially
> interesting ones not covered by I08 above. */
> else if (outer_code == COMPARE
> && ((unsigned HOST_WIDE_INT) INTVAL (x)
Was that supposed to be cbranch* instead of cbrach*?
> @@ -2440,7 +2440,7 @@ sh_rtx_costs (rtx x, int code, int outer
> if (TARGET_SHMEDIA)
> *total = COSTS_N_INSNS (4);
> /* prepare_cmp_insn will force costly constants int registers before
> - the cbrachdi4 patttern can see them, so preserve potentially
> + the cbrachdi4 pattern can see them, so preserve potentially
> interesting ones. */
> else if (outer_code == COMPARE && GET_MODE (x) == DImode)
> *total = 1;
Likewise.
--
Rask Ingemann Lambertsen
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-12-02 2:26 Kazu Hirata
2006-12-02 14:26 ` Rask Ingemann Lambertsen
0 siblings, 1 reply; 38+ messages in thread
From: Kazu Hirata @ 2006-12-02 2:26 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-12-02 Kazu Hirata <kazu@codesourcery.com>
* builtins.c, cfgloop.h, cgraph.h, config/arm/arm.c,
config/i386/i386.c, config/i386/i386.h, config/mips/mips.h,
config/rs6000/cell.md, config/rs6000/rs6000.c, config/sh/sh.c,
config/sh/sh4-300.md, config/spu/spu-builtins.def,
config/spu/spu-c.c, config/spu/spu-modes.def,
config/spu/spu.c, config/spu/spu.md,
config/spu/spu_internals.h, config/spu/vmx2spu.h,
fold-const.c, fwprop.c, predict.c, tree-data-ref.h,
tree-flow.h, tree-ssa-loop-manip.c, tree-ssa-loop-niter.c,
tree-ssa-pre.c, tree-vect-analyze.c, tree-vect-transform.c,
tree-vectorizer.c, tree-vrp.c: Fix comment typos. Follow
spelling conventions.
Index: builtins.c
===================================================================
--- builtins.c (revision 119436)
+++ builtins.c (working copy)
@@ -554,7 +554,7 @@ expand_builtin_return_addr (enum built_i
override us. Therefore frame pointer elimination is OK, and using
the soft frame pointer is OK.
- For a non-zero count, or a zero count with __builtin_frame_address,
+ For a nonzero count, or a zero count with __builtin_frame_address,
we require a stable offset from the current frame pointer to the
previous one, so we must use the hard frame pointer, and
we must disable frame pointer elimination. */
@@ -11495,7 +11495,7 @@ init_target_chars (void)
/* Helper function for do_mpfr_arg*(). Ensure M is a normal number
and no overflow/underflow occurred. INEXACT is true if M was not
- exacly calculated. TYPE is the tree type for the result. This
+ exactly calculated. TYPE is the tree type for the result. This
function assumes that you cleared the MPFR flags and then
calculated M to see if anything subsequently set a flag prior to
entering this function. Return NULL_TREE if any checks fail. */
Index: cfgloop.h
===================================================================
--- cfgloop.h (revision 119436)
+++ cfgloop.h (working copy)
@@ -143,7 +143,7 @@ struct loop
struct nb_iter_bound *bounds;
/* If not NULL, loop has just single exit edge stored here (edges to the
- EXIT_BLOCK_PTR do not count. Do not use direcly, this field should
+ EXIT_BLOCK_PTR do not count. Do not use directly; this field should
only be accessed via single_exit/set_single_exit functions. */
edge single_exit_;
Index: cgraph.h
===================================================================
--- cgraph.h (revision 119436)
+++ cgraph.h (working copy)
@@ -51,7 +51,7 @@ enum availability
struct cgraph_local_info GTY(())
{
- /* Estiimated stack frame consumption by the function. */
+ /* Estimated stack frame consumption by the function. */
HOST_WIDE_INT estimated_self_stack_size;
/* Size of the function before inlining. */
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c (revision 119436)
+++ config/arm/arm.c (working copy)
@@ -394,7 +394,7 @@ rtx arm_compare_op0, arm_compare_op1;
/* The processor for which instructions should be scheduled. */
enum processor_type arm_tune = arm_none;
-/* The default processor used if not overriden by commandline. */
+/* The default processor used if not overridden by commandline. */
static enum processor_type arm_default_cpu = arm_none;
/* Which floating point model to use. */
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 119436)
+++ config/i386/i386.c (working copy)
@@ -530,7 +530,7 @@ struct processor_costs athlon_cost = {
COSTS_N_INSNS (2), /* cost of FCHS instruction. */
COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
/* For some reason, Athlon deals better with REP prefix (relative to loops)
- comopared to K8. Alignment becomes important after 8 bytes for mempcy and
+ compared to K8. Alignment becomes important after 8 bytes for mempcy and
128 bytes for memset. */
{{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
DUMMY_STRINGOP_ALGS},
@@ -13171,7 +13171,7 @@ expand_movmem_epilogue (rtx destmem, rtx
/* When there are stringops, we can cheaply increase dest and src pointers.
Otherwise we save code size by maintaining offset (zero is readily
- available from preceeding rep operation) and using x86 addressing modes.
+ available from preceding rep operation) and using x86 addressing modes.
*/
if (TARGET_SINGLE_STRINGOP)
{
@@ -13621,7 +13621,7 @@ ix86_expand_movmem (rtx dst, rtx src, rt
if (GET_CODE (align_exp) == CONST_INT)
align = INTVAL (align_exp);
- /* i386 can do missaligned access on resonably increased cost. */
+ /* i386 can do misaligned access on reasonably increased cost. */
if (GET_CODE (expected_align_exp) == CONST_INT
&& INTVAL (expected_align_exp) > align)
align = INTVAL (expected_align_exp);
@@ -13783,7 +13783,7 @@ ix86_expand_movmem (rtx dst, rtx src, rt
dst = change_address (dst, BLKmode, destreg);
}
- /* Epologue to copy the remaining bytes. */
+ /* Epilogue to copy the remaining bytes. */
if (label)
{
if (size_needed < desired_align - align)
@@ -13909,7 +13909,7 @@ ix86_expand_setmem (rtx dst, rtx count_e
if (GET_CODE (align_exp) == CONST_INT)
align = INTVAL (align_exp);
- /* i386 can do missaligned access on resonably increased cost. */
+ /* i386 can do misaligned access on reasonably increased cost. */
if (GET_CODE (expected_align_exp) == CONST_INT
&& INTVAL (expected_align_exp) > align)
align = INTVAL (expected_align_exp);
Index: config/i386/i386.h
===================================================================
--- config/i386/i386.h (revision 119436)
+++ config/i386/i386.h (working copy)
@@ -1494,7 +1494,7 @@ typedef struct ix86_args {
int warn_mmx; /* True when we want to warn about MMX ABI. */
int maybe_vaarg; /* true for calls to possibly vardic fncts. */
int float_in_x87; /* 1 if floating point arguments should
- be passed in 80387 registere. */
+ be passed in 80387 registers. */
int float_in_sse; /* 1 if in 32-bit mode SFmode (2 for DFmode) should
be passed in SSE registers. Otherwise 0. */
} CUMULATIVE_ARGS;
Index: config/mips/mips.h
===================================================================
--- config/mips/mips.h (revision 119436)
+++ config/mips/mips.h (working copy)
@@ -576,7 +576,7 @@ extern const struct mips_rtx_cost_data *
been generated up to this point. */
#define ISA_HAS_BRANCHLIKELY (!ISA_MIPS1)
-/* ISA has a three-operand multiplcation instruction (usually spelt "mul"). */
+/* ISA has a three-operand multiplication instruction (usually spelt "mul"). */
#define ISA_HAS_MUL3 ((TARGET_MIPS3900 \
|| TARGET_MIPS5400 \
|| TARGET_MIPS5500 \
Index: config/rs6000/cell.md
===================================================================
--- config/rs6000/cell.md (revision 119436)
+++ config/rs6000/cell.md (working copy)
@@ -21,10 +21,10 @@
;; Sources: BE BOOK4 (/sfs/enc/doc/PPU_BookIV_DD3.0_latest.pdf)
-;; BE Architechture *DD3.0 and DD3.1*
+;; BE Architecture *DD3.0 and DD3.1*
;; This file simulate PPU processor unit backend of pipeline, maualP24.
;; manual P27, stall and flush points
-;; IU, XU, VSU, dipatcher decodes and dispatch 2 insns per cycle in program
+;; IU, XU, VSU, dispatcher decodes and dispatch 2 insns per cycle in program
;; order, the grouped adress are aligned by 8
;; This file only simulate one thread situation
;; XU executes all fixed point insns(3 units, a simple alu, a complex unit,
@@ -43,7 +43,7 @@
;;VMX(perm,vsu_ls, fp_ls) X
;; X are illegal combination.
-;; Dual issue exceptons:
+;; Dual issue exceptions:
;;(1) nop-pipelined FXU instr in slot 0
;;(2) non-pipelined FPU inst in slot 0
;; CSI instr(contex-synchronizing insn)
@@ -51,7 +51,7 @@
;; BRU unit: bru(none register stall), bru_cr(cr register stall)
;; VSU unit: vus(vmx simple), vup(vmx permute), vuc(vmx complex),
-;; vuf(vmx float), fpu(floats). fpu_div is hypthetical, it is for
+;; vuf(vmx float), fpu(floats). fpu_div is hypothetical, it is for
;; nonpipelined simulation
;; micr insns will stall at least 7 cycles to get the first instr from ROM,
;; micro instructions are not dual issued.
@@ -378,7 +378,7 @@ (define_bypass 3 "cell-vecfloat" "cell-v
; this is not correct,
;; this is a stall in general and not dependent on result
(define_bypass 13 "cell-vecstore" "cell-fpstore")
-; this is not correct, this can never be true, not depent on result
+; this is not correct, this can never be true, not dependent on result
(define_bypass 7 "cell-fp" "cell-fpload")
;; vsu1 should avoid writing to the same target register as vsu2 insn
;; within 12 cycles.
@@ -396,6 +396,6 @@ (define_bypass 10 "cell-mtjmpr" "cell-br
;;Things are not simulated:
;; update instruction, update address gpr are not simulated
-;; vrefp, vrsqrtefp have latency(14), currently simluated as 12 cycle float
+;; vrefp, vrsqrtefp have latency(14), currently simulated as 12 cycle float
;; insns
Index: config/rs6000/rs6000.c
===================================================================
--- config/rs6000/rs6000.c (revision 119436)
+++ config/rs6000/rs6000.c (working copy)
@@ -17557,7 +17557,7 @@ rs6000_sched_reorder2 (FILE *dump, int s
cycle and we attempt to locate another load in the ready list to
issue with it.
- - If the pedulum is -2, then two stores have already been
+ - If the pendulum is -2, then two stores have already been
issued in this cycle, so we increase the priority of the first load
in the ready list to increase it's likelihood of being chosen first
in the next cycle.
Index: config/sh/sh.c
===================================================================
--- config/sh/sh.c (revision 119436)
+++ config/sh/sh.c (working copy)
@@ -1416,7 +1416,7 @@ prepare_cbranch_operands (rtx *operands,
compare r0. Hence, if operands[1] has to be loaded from somewhere else
into a register, that register might as well be r0, and we allow the
constant. If it is already in a register, this is likely to be
- allocatated to a different hard register, thus we load the constant into
+ allocated to a different hard register, thus we load the constant into
a register unless it is zero. */
if (!REG_P (operands[2])
&& (GET_CODE (operands[2]) != CONST_INT
@@ -1468,7 +1468,7 @@ expand_cbranchsi4 (rtx *operands, enum r
operation should be EQ or NE.
- If items are searched in an ordered tree from the root, we can expect
the highpart to be unequal about half of the time; operation should be
- an unequality comparison, operands non-constant, and overall probability
+ an inequality comparison, operands non-constant, and overall probability
about 50%. Likewise for quicksort.
- Range checks will be often made against constants. Even if we assume for
simplicity an even distribution of the non-constant operand over a
@@ -2413,7 +2413,7 @@ sh_rtx_costs (rtx x, int code, int outer
&& CONST_OK_FOR_K08 (INTVAL (x)))
*total = 1;
/* prepare_cmp_insn will force costly constants int registers before
- the cbrach[sd]i4 pattterns can see them, so preserve potentially
+ the cbrach[sd]i4 patterns can see them, so preserve potentially
interesting ones not covered by I08 above. */
else if (outer_code == COMPARE
&& ((unsigned HOST_WIDE_INT) INTVAL (x)
@@ -2440,7 +2440,7 @@ sh_rtx_costs (rtx x, int code, int outer
if (TARGET_SHMEDIA)
*total = COSTS_N_INSNS (4);
/* prepare_cmp_insn will force costly constants int registers before
- the cbrachdi4 patttern can see them, so preserve potentially
+ the cbrachdi4 pattern can see them, so preserve potentially
interesting ones. */
else if (outer_code == COMPARE && GET_MODE (x) == DImode)
*total = 1;
Index: config/sh/sh4-300.md
===================================================================
--- config/sh/sh4-300.md (revision 119436)
+++ config/sh/sh4-300.md (working copy)
@@ -189,7 +189,7 @@ (define_insn_reservation "sh4_300_ocbwb"
;; In most cases, the insn that loads the address of the call should have
;; a non-zero latency (mov rn,rm doesn't make sense since we could use rn
;; for the address then). Thus, a preceding insn that can be paired with
-;; a call should be elegible for the delay slot.
+;; a call should be eligible for the delay slot.
;;
;; calls introduce a longisch delay that is likely to flush the pipelines
;; of the caller's instructions. Ordinary functions tend to end with a
Index: config/spu/spu-builtins.def
===================================================================
--- config/spu/spu-builtins.def (revision 119436)
+++ config/spu/spu-builtins.def (working copy)
@@ -1,4 +1,4 @@
-/* Definitions of builtin fuctions for the Synergistic Processing Unit (SPU). */
+/* Definitions of builtin functions for the Synergistic Processing Unit (SPU). */
/* Copyright (C) 2006 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it under
@@ -24,8 +24,8 @@
#define _A3(a,b,c) {a, b, c, SPU_BTI_END_OF_PARAMS}
#define _A4(a,b,c,d) {a, b, c, d, SPU_BTI_END_OF_PARAMS}
-/* definitions to support si intrinisic functions: (These and other builtin
- * definitions must preceed definitions of the overloaded generic intrinsics */
+/* definitions to support si intrinsic functions: (These and other builtin
+ * definitions must precede definitions of the overloaded generic intrinsics */
DEF_BUILTIN (SI_LQD, CODE_FOR_spu_lqd, "si_lqd", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_S10_4))
DEF_BUILTIN (SI_LQX, CODE_FOR_spu_lqx, "si_lqx", B_INSN, _A3(SPU_BTI_QUADWORD, SPU_BTI_QUADWORD, SPU_BTI_QUADWORD))
@@ -701,10 +701,10 @@ DEF_BUILTIN (SPU_PROMOTE_7, CODE_
DEF_BUILTIN (SPU_PROMOTE_8, CODE_FOR_spu_promote, "spu_promote_8", B_INTERNAL, _A3(SPU_BTI_V4SF, SPU_BTI_FLOAT, SPU_BTI_INTSI))
DEF_BUILTIN (SPU_PROMOTE_9, CODE_FOR_spu_promote, "spu_promote_9", B_INTERNAL, _A3(SPU_BTI_V2DF, SPU_BTI_DOUBLE, SPU_BTI_INTSI))
-/* We need something that is not B_INTERNAL as a sentinal. */
+/* We need something that is not B_INTERNAL as a sentinel. */
-/* These are for the convenience of imlpemnting fma() in the standard
- libraries. */
+/* These are for the convenience of implementing fma() in the standard
+ libraries. */
DEF_BUILTIN (SCALAR_FMA, CODE_FOR_fma_sf, "fmas", B_INSN, _A4(SPU_BTI_FLOAT, SPU_BTI_FLOAT, SPU_BTI_FLOAT, SPU_BTI_FLOAT))
DEF_BUILTIN (SCALAR_DFMA, CODE_FOR_fma_df, "dfmas", B_INSN, _A4(SPU_BTI_DOUBLE, SPU_BTI_DOUBLE, SPU_BTI_DOUBLE, SPU_BTI_DOUBLE))
Index: config/spu/spu-c.c
===================================================================
--- config/spu/spu-c.c (revision 119436)
+++ config/spu/spu-c.c (working copy)
@@ -72,7 +72,7 @@ spu_resolve_overloaded_builtin (tree fnd
struct spu_builtin_description *desc;
tree match = NULL_TREE;
- /* The vector types are not available if the backend is not initalized */
+ /* The vector types are not available if the backend is not initialized. */
gcc_assert (!flag_preprocess_only);
desc = &spu_builtins[fcode];
Index: config/spu/spu-modes.def
===================================================================
--- config/spu/spu-modes.def (revision 119436)
+++ config/spu/spu-modes.def (working copy)
@@ -25,8 +25,8 @@ VECTOR_MODES (INT, 16); /* V16QI V
VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
-/* A special mode for the intr regsister so we can treat it differently
- for conditional moves. */
+/* A special mode for the intr register so we can treat it differently
+ for conditional moves. */
RANDOM_MODE (INTR);
/* cse_insn needs an INT_MODE larger than WORD_MODE, otherwise some
Index: config/spu/spu.c
===================================================================
--- config/spu/spu.c (revision 119436)
+++ config/spu/spu.c (working copy)
@@ -322,7 +322,7 @@ valid_subreg (rtx op)
}
/* When insv and ext[sz]v ar passed a TI SUBREG, we want to strip it off
- and ajust the start offset. */
+ and adjust the start offset. */
static rtx
adjust_operand (rtx op, HOST_WIDE_INT * start)
{
@@ -1651,8 +1651,8 @@ int spu_hint_dist = (8 * 4);
/* An array of these is used to propagate hints to predecessor blocks. */
struct spu_bb_info
{
- rtx prop_jump; /* propogated from another block */
- basic_block bb; /* the orignal block. */
+ rtx prop_jump; /* propagated from another block */
+ basic_block bb; /* the original block. */
};
/* The special $hbr register is used to prevent the insn scheduler from
@@ -2455,7 +2455,7 @@ spu_legitimate_address (enum machine_mod
}
/* When the address is reg + const_int, force the const_int into a
- regiser. */
+ register. */
rtx
spu_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
enum machine_mode mode)
@@ -2697,7 +2697,7 @@ spu_pass_by_reference (CUMULATIVE_ARGS *
} va_list[1];
- wheare __args points to the arg that will be returned by the next
+ where __args points to the arg that will be returned by the next
va_arg(), and __skip points to the previous stack frame such that
when __args == __skip we should advance __args by 32 bytes. */
static tree
@@ -2913,8 +2913,8 @@ spu_conditional_register_usage (void)
aligned. Taking into account that CSE might replace this reg with
another one that has not been marked aligned.
So this is really only true for frame, stack and virtual registers,
- which we know are always aligned and should not be adversly effected
- by CSE. */
+ which we know are always aligned and should not be adversely effected
+ by CSE. */
static int
regno_aligned_for_load (int regno)
{
@@ -2981,7 +2981,7 @@ store_with_one_insn_p (rtx mem)
if (GET_CODE (addr) == SYMBOL_REF)
{
/* We use the associated declaration to make sure the access is
- refering to the whole object.
+ referring to the whole object.
We check both MEM_EXPR and and SYMBOL_REF_DECL. I'm not sure
if it is necessary. Will there be cases where one exists, and
the other does not? Will there be cases where both exist, but
@@ -3426,8 +3426,8 @@ mem_is_padded_component_ref (rtx x)
if (GET_MODE (x) != TYPE_MODE (TREE_TYPE (t)))
return 0;
/* If there are no following fields then the field alignment assures
- the structure is padded to the alignement which means this field is
- padded too. */
+ the structure is padded to the alignment which means this field is
+ padded too. */
if (TREE_CHAIN (t) == 0)
return 1;
/* If the following field is also aligned then this field will be
Index: config/spu/spu.md
===================================================================
--- config/spu/spu.md (revision 119436)
+++ config/spu/spu.md (working copy)
@@ -1178,8 +1178,8 @@ (define_insn "mpyu_si"
[(set_attr "type" "fp7")])
;; This isn't always profitable to use. Consider r = a * b + c * d.
-;; It's faster to do the multplies in parallel then add them. If we
-;; merge a multply and add it prevents the multplies from happening in
+;; It's faster to do the multiplies in parallel then add them. If we
+;; merge a multiply and add it prevents the multiplies from happening in
;; parallel.
(define_insn "mpya_si"
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
Index: config/spu/spu_internals.h
===================================================================
--- config/spu/spu_internals.h (revision 119436)
+++ config/spu/spu_internals.h (working copy)
@@ -256,7 +256,7 @@
#define __align_hint(ptr,base,offset) __builtin_spu_align_hint(ptr,base,offset)
-/* generic spu_* intrinisics */
+/* generic spu_* intrinsics */
#define spu_splats(scalar) __builtin_spu_splats(scalar)
#define spu_convtf(ra,imm) __builtin_spu_convtf(ra,imm)
Index: config/spu/vmx2spu.h
===================================================================
--- config/spu/vmx2spu.h (revision 119436)
+++ config/spu/vmx2spu.h (working copy)
@@ -2155,7 +2155,7 @@ static inline vec_int4 vec_subs(vec_int4
}
-/* vec_sum4s (vector sum across partial (1/4) staturated)
+/* vec_sum4s (vector sum across partial (1/4) saturated)
* =========
*/
static inline vec_uint4 vec_sum4s(vec_uchar16 a, vec_uint4 b)
@@ -2187,7 +2187,7 @@ static inline vec_int4 vec_sum4s(vec_sho
}
-/* vec_sum2s (vector sum across partial (1/2) staturated)
+/* vec_sum2s (vector sum across partial (1/2) saturated)
* =========
*/
static inline vec_int4 vec_sum2s(vec_int4 a, vec_int4 b)
@@ -2223,7 +2223,7 @@ static inline vec_int4 vec_sum2s(vec_int
}
-/* vec_sums (vector sum staturated)
+/* vec_sums (vector sum saturated)
* ========
*/
static inline vec_int4 vec_sums(vec_int4 a, vec_int4 b)
@@ -2909,7 +2909,7 @@ static inline int vec_all_ne(vec_float4
}
-/* vec_all_nge (all elements not greater than or eqaul)
+/* vec_all_nge (all elements not greater than or equal)
* ===========
*/
static inline int vec_all_nge(vec_float4 a, vec_float4 b)
@@ -3385,7 +3385,7 @@ static inline int vec_any_ne(vec_float4
}
-/* vec_any_nge (any elements not greater than or eqaul)
+/* vec_any_nge (any elements not greater than or equal)
* ===========
*/
static inline int vec_any_nge(vec_float4 a, vec_float4 b)
Index: fold-const.c
===================================================================
--- fold-const.c (revision 119436)
+++ fold-const.c (working copy)
@@ -7818,7 +7818,7 @@ maybe_canonicalize_comparison_1 (enum tr
|| TREE_OVERFLOW (cst0))
return NULL_TREE;
- /* See if we can reduce the mangitude of the constant in
+ /* See if we can reduce the magnitude of the constant in
arg0 by changing the comparison code. */
if (code0 == INTEGER_CST)
{
@@ -7899,7 +7899,7 @@ maybe_canonicalize_comparison (enum tree
return t;
/* Try canonicalization by simplifying arg1 using the swapped
- comparsion. */
+ comparison. */
code = swap_tree_comparison (code);
return maybe_canonicalize_comparison_1 (code, type, arg1, arg0);
}
Index: fwprop.c
===================================================================
--- fwprop.c (revision 119436)
+++ fwprop.c (working copy)
@@ -389,7 +389,7 @@ propagate_rtx_1 (rtx *px, rtx old, rtx n
}
/* Replace all occurrences of OLD in X with NEW and try to simplify the
- resulting expression (in mode MODE). Return a new expresion if it is
+ resulting expression (in mode MODE). Return a new expression if it is
a constant, otherwise X.
Simplifications where occurrences of NEW collapse to a constant are always
Index: predict.c
===================================================================
--- predict.c (revision 119436)
+++ predict.c (working copy)
@@ -1601,7 +1601,7 @@ estimate_loops_at_level (struct loop *fi
}
}
-/* Propates frequencies through structure of loops. */
+/* Propagates frequencies through structure of loops. */
static void
estimate_loops (void)
Index: tree-data-ref.h
===================================================================
--- tree-data-ref.h (revision 119436)
+++ tree-data-ref.h (working copy)
@@ -119,7 +119,7 @@ struct data_reference
a[j].b[5][j] = 0;
Here the offset expression (j * C_j + C) will not contain variables after
- subsitution of j=3 (3*C_j + C).
+ substitution of j=3 (3*C_j + C).
Misalignment can be calculated only if all the variables can be
substituted with constants, otherwise, we record maximum possible alignment
Index: tree-flow.h
===================================================================
--- tree-flow.h (revision 119436)
+++ tree-flow.h (working copy)
@@ -39,8 +39,8 @@ struct basic_block_def;
typedef struct basic_block_def *basic_block;
#endif
-/* Gimple dataflow datastructure. All publically available fields shall have
- gimple_ accessor defined in tree-flow-inline.h, all publically modifiable
+/* Gimple dataflow datastructure. All publicly available fields shall have
+ gimple_ accessor defined in tree-flow-inline.h, all publicly modifiable
fields should have gimple_set accessor. */
struct gimple_df GTY(()) {
/* Array of all variables referenced in the function. */
Index: tree-ssa-loop-manip.c
===================================================================
--- tree-ssa-loop-manip.c (revision 119436)
+++ tree-ssa-loop-manip.c (working copy)
@@ -627,7 +627,7 @@ can_unroll_loop_p (struct loop *loop, un
|| niter->cmp == ERROR_MARK
/* Scalar evolutions analysis might have copy propagated
the abnormal ssa names into these expressions, hence
- emiting the computations based on them during loop
+ emitting the computations based on them during loop
unrolling might create overlapping life ranges for
them, and failures in out-of-ssa. */
|| contains_abnormal_ssa_name_p (niter->may_be_zero)
Index: tree-ssa-loop-niter.c
===================================================================
--- tree-ssa-loop-niter.c (revision 119436)
+++ tree-ssa-loop-niter.c (working copy)
@@ -1831,7 +1831,7 @@ idx_infer_loop_bounds (tree base, tree *
unsigned char).
To make things simpler, we require both bounds to fit into type, although
- there are cases where this would not be strightly necessary. */
+ there are cases where this would not be strictly necessary. */
if (!int_fits_type_p (high, type)
|| !int_fits_type_p (low, type))
return true;
@@ -2086,7 +2086,7 @@ n_of_executions_at_most (tree stmt,
-- if NITER_BOUND->is_exit is true, then everything before
NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
- times, and everyting after it at most NITER_BOUND->bound times.
+ times, and everything after it at most NITER_BOUND->bound times.
-- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
is executed, then NITER_BOUND->stmt is executed as well in the same
Index: tree-ssa-pre.c
===================================================================
--- tree-ssa-pre.c (revision 119436)
+++ tree-ssa-pre.c (working copy)
@@ -1668,7 +1668,7 @@ compute_antic_aux (basic_block block, bo
(since the maximal set often has 300+ members, even when you
have a small number of blocks).
Basically, we defer the computation of ANTIC for this block
- until we have processed it's successor, which will inveitably
+ until we have processed it's successor, which will inevitably
have a *much* smaller set of values to phi translate once
clean has been run on it.
The cost of doing this is that we technically perform more
Index: tree-vect-analyze.c
===================================================================
--- tree-vect-analyze.c (revision 119436)
+++ tree-vect-analyze.c (working copy)
@@ -1428,7 +1428,7 @@ vect_enhance_data_refs_alignment (loop_v
{
/* For interleaved access we peel only if number of iterations in
the prolog loop ({VF - misalignment}), is a multiple of the
- number of the interelaved accesses. */
+ number of the interleaved accesses. */
int elem_size, mis_in_elements;
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
@@ -2228,7 +2228,8 @@ vect_mark_stmts_to_be_vectorized (loop_v
is not used inside the loop), it will be vectorized, and therefore
the corresponding DEF_STMTs need to marked as relevant.
We distinguish between two kinds of relevant stmts - those that are
- used by a reduction conputation, and those that are (also) used by a regular computation. This allows us later on to identify stmts
+ used by a reduction computation, and those that are (also) used by
+ a regular computation. This allows us later on to identify stmts
that are used solely by a reduction, and therefore the order of
the results that they produce does not have to be kept.
*/
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c (revision 119436)
+++ tree-vect-transform.c (working copy)
@@ -368,7 +368,7 @@ vect_create_data_ref_ptr (tree stmt,
/* Function bump_vector_ptr
Increment a pointer (to a vector type) by vector-size. Connect the new
- increment stmt to the exising def-use update-chain of the pointer.
+ increment stmt to the existing def-use update-chain of the pointer.
The pointer def-use update-chain before this function:
DATAREF_PTR = phi (p_0, p_2)
@@ -658,7 +658,7 @@ vect_get_vec_def_for_operand (tree op, t
stmts operating on wider types we need to create 'VF/nunits' "copies" of the
vector stmt (each computing a vector of 'nunits' results, and together
computing 'VF' results in each iteration). This function is called when
- vectorizing such a stmt (e.g. vectorizing S2 in the illusration below, in
+ vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
which VF=16 and nuniti=4, so the number of copies required is 4):
scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
@@ -2495,13 +2495,13 @@ vect_strided_store_supported (tree vecty
/* Function vect_permute_store_chain.
- Given a chain of interleaved strores in DR_CHAIN of LENGTH that must be
+ Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
a power of 2, generate interleave_high/low stmts to reorder the data
correctly for the stores. Return the final references for stores in
RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
- The input is 4 vectors each containg 8 elements. We assign a number to each
+ The input is 4 vectors each containing 8 elements. We assign a number to each
element, the input sequence is:
1st vec: 0 1 2 3 4 5 6 7
@@ -2529,7 +2529,7 @@ vect_strided_store_supported (tree vecty
and of interleave_low: 2 6 3 7
- The permutaion is done in log LENGTH stages. In each stage interleave_high
+ The permutation is done in log LENGTH stages. In each stage interleave_high
and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
where the first argument is taken from the first half of DR_CHAIN and the
second argument from it's second half.
@@ -2758,7 +2758,7 @@ vectorizable_store (tree stmt, block_stm
And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
(the order of the data-refs in the output of vect_permute_store_chain
corresponds to the order of scalar stmts in the interleaving chain - see
- the documentaion of vect_permute_store_chain()).
+ the documentation of vect_permute_store_chain()).
In case of both multiple types and interleaving, above vector stores and
permutation stmts are created for every copy. The result vector stmts are
@@ -3050,7 +3050,7 @@ vect_strided_load_supported (tree vectyp
correctly. Return the final references for loads in RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
- The input is 4 vectors each containg 8 elements. We assign a number to each
+ The input is 4 vectors each containing 8 elements. We assign a number to each
element, the input sequence is:
1st vec: 0 1 2 3 4 5 6 7
@@ -3078,7 +3078,7 @@ vect_strided_load_supported (tree vectyp
and of extract_odd: 1 3 5 7
- The permutaion is done in log LENGTH stages. In each stage extract_even and
+ The permutation is done in log LENGTH stages. In each stage extract_even and
extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
order. In our example,
@@ -3443,7 +3443,7 @@ vectorizable_load (tree stmt, block_stmt
And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
(the order of the data-refs in the output of vect_permute_load_chain
corresponds to the order of scalar stmts in the interleaving chain - see
- the documentaion of vect_permute_load_chain()).
+ the documentation of vect_permute_load_chain()).
The generation of permutation stmts and recording them in
STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
@@ -4332,7 +4332,7 @@ vect_gen_niters_for_prolog_loop (loop_ve
if (DR_GROUP_FIRST_DR (stmt_info))
{
- /* For interleaved access element size must be multipled by the size of
+ /* For interleaved access element size must be multiplied by the size of
the interleaved group. */
group_size = DR_GROUP_SIZE (vinfo_for_stmt (
DR_GROUP_FIRST_DR (stmt_info)));
Index: tree-vectorizer.c
===================================================================
--- tree-vectorizer.c (revision 119436)
+++ tree-vectorizer.c (working copy)
@@ -1762,7 +1762,7 @@ vect_is_simple_use (tree operand, loop_v
vector form (i.e., when operating on arguments of type VECTYPE).
The two kinds of widening operations we currently support are
- NOP and WIDEN_MULT. This function checks if these oprations
+ NOP and WIDEN_MULT. This function checks if these operations
are supported by the target platform either directly (via vector
tree-codes), or via target builtins.
@@ -1796,9 +1796,9 @@ supportable_widening_operation (enum tre
vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
However, in the special case that the result of the widening operation is
- used in a reduction copmutation only, the order doesn't matter (because
+ used in a reduction computation only, the order doesn't matter (because
when vectorizing a reduction we change the order of the computation).
- Some targets can take advatage of this and generate more efficient code.
+ Some targets can take advantage of this and generate more efficient code.
For example, targets like Altivec, that support widen_mult using a sequence
of {mult_even,mult_odd} generate the following vectors:
vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8]. */
Index: tree-vrp.c
===================================================================
--- tree-vrp.c (revision 119436)
+++ tree-vrp.c (working copy)
@@ -2902,7 +2902,7 @@ register_edge_assert_for (tree name, edg
/* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
statement of NAME we can assert both operands of the TRUTH_AND_EXPR
- have non-zero value. */
+ have nonzero value. */
if (((comp_code == EQ_EXPR && integer_onep (val))
|| (comp_code == NE_EXPR && integer_zerop (val))))
{
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-10-29 10:32 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2006-10-29 10:32 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-10-29 Kazu Hirata <kazu@codesourcery.com>
* config/darwin.c, config/darwin.opt, config/ia64/itanium1.md,
config/ia64/itanium2.md, real.c, tree-ssa-structalias.c: Fix
comment typos.
Index: config/darwin.c
===================================================================
--- config/darwin.c (revision 118129)
+++ config/darwin.c (working copy)
@@ -1610,7 +1610,7 @@ darwin_set_default_type_attributes (tree
TYPE_ATTRIBUTES (type));
}
-/* True, iff we're generating code for loadable kernel extentions. */
+/* True, iff we're generating code for loadable kernel extensions. */
bool
darwin_kextabi_p (void) {
Index: config/darwin.opt
===================================================================
--- config/darwin.opt (revision 118129)
+++ config/darwin.opt (working copy)
@@ -33,8 +33,8 @@ Set sizeof(bool) to 1
fapple-kext
Target Report Var(flag_apple_kext)
-Generate code for darwin loadable kernel extentions
+Generate code for darwin loadable kernel extensions
mkernel
Target Report Var(flag_mkernel)
-Generate code for the kernel or loadable kernel extentions
+Generate code for the kernel or loadable kernel extensions
Index: config/ia64/itanium1.md
===================================================================
--- config/ia64/itanium1.md (revision 118129)
+++ config/ia64/itanium1.md (working copy)
@@ -131,7 +131,7 @@
This is only worth to do when we are debugging the description
and need to look more accurately at reservations of states.
- o "ndfa" which makes automata with nondetermenistic reservation
+ o "ndfa" which makes automata with nondeterministic reservation
by insns.
o (define_reservation string string) names reservation (the first
Index: config/ia64/itanium2.md
===================================================================
--- config/ia64/itanium2.md (revision 118129)
+++ config/ia64/itanium2.md (working copy)
@@ -129,7 +129,7 @@
This is only worth to do when we are debugging the description
and need to look more accurately at reservations of states.
- o "ndfa" which makes automata with nondetermenistic reservation
+ o "ndfa" which makes automata with nondeterministic reservation
by insns.
o (define_reservation string string) names reservation (the first
Index: real.c
===================================================================
--- real.c (revision 118129)
+++ real.c (working copy)
@@ -4923,7 +4923,7 @@ real_copysign (REAL_VALUE_TYPE *r, const
}
/* Convert from REAL_VALUE_TYPE to MPFR. The caller is responsible
- for initializing and clearing the MPFR parmeter. */
+ for initializing and clearing the MPFR parameter. */
void
mpfr_from_real (mpfr_ptr m, const REAL_VALUE_TYPE *r)
Index: tree-ssa-structalias.c
===================================================================
--- tree-ssa-structalias.c (revision 118129)
+++ tree-ssa-structalias.c (working copy)
@@ -2421,7 +2421,7 @@ get_constraint_for_component_ref (tree t
t = get_ref_base_and_extent (t, &bitpos, &bitsize, &bitmaxsize);
- /* String constants's are readonly, so there is nothing to really do
+ /* String constants are readonly, so there is nothing to really do
here. */
if (TREE_CODE (t) == STRING_CST)
return;
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-06-30 19:34 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2006-06-30 19:34 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-06-30 Kazu Hirata <kazu@codesourcery.com>
* cfgexpand.c, config/i386/i386.c, genpreds.c, tree-cfg.c: Fix
comment typos.
Index: cfgexpand.c
===================================================================
--- cfgexpand.c (revision 115097)
+++ cfgexpand.c (working copy)
@@ -1626,7 +1626,7 @@ tree_expand_cfg (void)
init_block = construct_init_block ();
/* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the
- remainining edges in expand_gimple_basic_block. */
+ remaining edges in expand_gimple_basic_block. */
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
e->flags &= ~EDGE_EXECUTABLE;
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 115097)
+++ config/i386/i386.c (working copy)
@@ -68,7 +68,7 @@ Boston, MA 02110-1301, USA. */
#define COSTS_N_BYTES(N) ((N) * 2)
static const
-struct processor_costs size_cost = { /* costs for tunning for size */
+struct processor_costs size_cost = { /* costs for tuning for size */
COSTS_N_BYTES (2), /* cost of an add instruction */
COSTS_N_BYTES (3), /* cost of a lea instruction */
COSTS_N_BYTES (2), /* variable shift costs */
Index: genpreds.c
===================================================================
--- genpreds.c (revision 115097)
+++ genpreds.c (working copy)
@@ -373,7 +373,7 @@ add_mode_tests (struct pred_data *p)
case AND:
/* The switch code generation in write_predicate_stmts prefers
rtx code tests to be at the top of the expression tree. So
- push this AND down into the second operand of an exisiting
+ push this AND down into the second operand of an existing
AND expression. */
if (generate_switch_p (XEXP (subexp, 0)))
pos = &XEXP (subexp, 1);
@@ -563,7 +563,7 @@ write_match_code_switch (rtx exp)
}
}
-/* Given a predictate expression EXP, write out a sequence of stmts
+/* Given a predicate expression EXP, write out a sequence of stmts
to evaluate it. This is similar to write_predicate_expr but can
generate efficient switch statements. */
Index: tree-cfg.c
===================================================================
--- tree-cfg.c (revision 115097)
+++ tree-cfg.c (working copy)
@@ -3121,7 +3121,7 @@ reinstall_phi_args (edge new_edge, edge
PENDING_STMT (old_edge) = NULL;
}
-/* Returns the basic block after that the new basic block created
+/* Returns the basic block after which the new basic block created
by splitting edge EDGE_IN should be placed. Tries to keep the new block
near its "logical" location. This is of most help to humans looking
at debugging dumps. */
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-05-28 19:10 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2006-05-28 19:10 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-05-28 Kazu Hirata <kazu@codesourcery.com>
* cfgcleanup.c, cfgexpand.c, cgraphunit.c, config/arm/arm.c,
config/fr30/fr30.md, config/i386/i386-interix.h,
config/i386/i386.c, config/i386/i386.md, config/sh/superh.h,
config/sh/superh64.h, config/v850/v850.c, df-core.c,
df-problems.c, df.h, except.c, final.c, haifa-sched.c,
lambda-code.c, libgcc2.h, omp-low.c, optabs.c, predict.c,
reload.c, tree-flow.h, tree-outof-ssa.c, tree-ssa-dce.c,
tree-ssa-pre.c, tree-vect-transform.c: Fix comment typos.
Follow spelling conventions.
* doc/invoke.texi, doc/rtl.texi, doc/tm.texi: Fix typos.
Follow spelling conventions.
Index: cfgcleanup.c
===================================================================
--- cfgcleanup.c (revision 114165)
+++ cfgcleanup.c (working copy)
@@ -1733,7 +1733,7 @@ try_crossjump_to_edge (int mode, edge e1
redirect_to->count += src1->count;
redirect_to->frequency += src1->frequency;
- /* We may have some registers visible trought the block. */
+ /* We may have some registers visible through the block. */
redirect_to->flags |= BB_DIRTY;
/* Recompute the frequencies and counts of outgoing edges. */
Index: cfgexpand.c
===================================================================
--- cfgexpand.c (revision 114165)
+++ cfgexpand.c (working copy)
@@ -764,7 +764,7 @@ expand_used_vars_for_block (tree block,
expand_used_vars_for_block (t, false);
/* Since we do not track exact variable lifetimes (which is not even
- possible for varibles whose address escapes), we mirror the block
+ possible for variables whose address escapes), we mirror the block
tree in the interference graph. Here we cause all variables at this
level, and all sublevels, to conflict. Do make certain that a
variable conflicts with itself. */
Index: cgraphunit.c
===================================================================
--- cgraphunit.c (revision 114165)
+++ cgraphunit.c (working copy)
@@ -222,7 +222,7 @@ decide_is_function_needed (struct cgraph
PR24561), but don't do so for always_inline functions, functions
declared inline and nested functions. These was optimized out
in the original implementation and it is unclear whether we want
- to change the behaviour here. */
+ to change the behavior here. */
if (((TREE_PUBLIC (decl)
|| (!optimize && !node->local.disregard_inline_limits
&& !DECL_DECLARED_INLINE_P (decl)
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c (revision 114165)
+++ config/arm/arm.c (working copy)
@@ -2802,7 +2802,7 @@ arm_pass_by_reference (CUMULATIVE_ARGS *
/* Encode the current state of the #pragma [no_]long_calls. */
typedef enum
{
- OFF, /* No #pramgma [no_]long_calls is in effect. */
+ OFF, /* No #pragma [no_]long_calls is in effect. */
LONG, /* #pragma long_calls is in effect. */
SHORT /* #pragma no_long_calls is in effect. */
} arm_pragma_enum;
Index: config/fr30/fr30.md
===================================================================
--- config/fr30/fr30.md (revision 114165)
+++ config/fr30/fr30.md (working copy)
@@ -35,7 +35,7 @@ (define_attr "size" "small,big"
;; Define an attribute to be used by the delay slot code.
-;; An instruction by default is considered to be 'delyabable'
+;; An instruction by default is considered to be 'delayable'
;; that is, it can be placed into a delay slot, but it is not
;; itself a delayed branch type instruction. An instruction
;; whose type is 'delayed' is one which has a delay slot, and
Index: config/i386/i386-interix.h
===================================================================
--- config/i386/i386-interix.h (revision 114165)
+++ config/i386/i386-interix.h (working copy)
@@ -84,7 +84,7 @@ Boston, MA 02110-1301, USA. */
#undef CPP_SPEC
/* Write out the correct language type definition for the header files.
Unless we have assembler language, write out the symbols for C.
- mieee is an Alpha specific variant. Cross polination a bad idea.
+ mieee is an Alpha specific variant. Cross pollination a bad idea.
*/
#define CPP_SPEC "-remap %{posix:-D_POSIX_SOURCE} \
-isystem %$INTERIX_ROOT/usr/include"
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 114165)
+++ config/i386/i386.c (working copy)
@@ -604,7 +604,7 @@ struct processor_costs generic64_cost =
COSTS_N_INSNS (1), /* cost of an add instruction */
/* On all chips taken into consideration lea is 2 cycles and more. With
this cost however our current implementation of synth_mult results in
- use of unnecesary temporary registers causing regression on several
+ use of unnecessary temporary registers causing regression on several
SPECfp benchmarks. */
COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
COSTS_N_INSNS (1), /* variable shift costs */
@@ -10513,7 +10513,7 @@ ix86_expand_carry_flag_compare (enum rtx
enum machine_mode mode =
GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
- /* Do not handle DImode compares that go trought special path. Also we can't
+ /* Do not handle DImode compares that go through special path. Also we can't
deal with FP compares yet. This is possible to add. */
if (mode == (TARGET_64BIT ? TImode : DImode))
return false;
Index: config/i386/i386.md
===================================================================
--- config/i386/i386.md (revision 114165)
+++ config/i386/i386.md (working copy)
@@ -13877,7 +13877,7 @@ (define_insn "align"
#else
/* It is tempting to use ASM_OUTPUT_ALIGN here, but we don't want to do that.
The align insn is used to avoid 3 jump instructions in the row to improve
- branch prediction and the benefits hardly outweight the cost of extra 8
+ branch prediction and the benefits hardly outweigh the cost of extra 8
nops on the average inserted by full alignment pseudo operation. */
#endif
return "";
Index: config/sh/superh.h
===================================================================
--- config/sh/superh.h (revision 114165)
+++ config/sh/superh.h (working copy)
@@ -28,7 +28,7 @@ Boston, MA 02110-1301, USA. */
defaults and provide options --defsym _start and --defsym _stack
which are required by the SuperH configuration of GNU ld.
- This file is intended to overide sh.h */
+ This file is intended to override sh.h. */
#ifndef _SUPERH_H
Index: config/sh/superh64.h
===================================================================
--- config/sh/superh64.h (revision 114165)
+++ config/sh/superh64.h (working copy)
@@ -25,7 +25,7 @@ Boston, MA 02110-1301, USA. */
/* This header file is used when the vendor name is set to 'superh'.
It configures the compiler for SH5 only and switches the default
endianess to little.
- This file is intended to overide sh.h, superh.h and sh64.h (which
+ This file is intended to override sh.h, superh.h and sh64.h (which
should have been included in that order) */
Index: config/v850/v850.c
===================================================================
--- config/v850/v850.c (revision 114165)
+++ config/v850/v850.c (working copy)
@@ -1070,7 +1070,7 @@ ep_memory_operand (rtx op, enum machine_
int mask;
/* If we are not using the EP register on a per-function basis
- then do not allow this optimisation at all. This is to
+ then do not allow this optimization at all. This is to
prevent the use of the SLD/SST instructions which cannot be
guaranteed to work properly due to a hardware bug. */
if (!TARGET_EP)
Index: df-core.c
===================================================================
--- df-core.c (revision 114165)
+++ df-core.c (working copy)
@@ -164,7 +164,7 @@ incremental algorithms.
As for the bit vector problems, there is no interface to give a set of
blocks over with to resolve the iteration. In general, restarting a
dataflow iteration is difficult and expensive. Again, the best way to
-keep the dataflow infomation up to data (if this is really what is
+keep the dataflow information up to data (if this is really what is
needed) it to formulate a problem specific solution.
There are fine grained calls for creating and deleting references from
Index: df-problems.c
===================================================================
--- df-problems.c (revision 114165)
+++ df-problems.c (working copy)
@@ -315,7 +315,7 @@ df_unset_seen (void)
sparse_invalidated_by call both play this game. */
/* Private data used to compute the solution for this problem. These
- data structures are not accessable outside of this module. */
+ data structures are not accessible outside of this module. */
struct df_ru_problem_data
{
@@ -851,7 +851,7 @@ df_ru_add_problem (struct df *df, int fl
here for the defs. */
/* Private data used to compute the solution for this problem. These
- data structures are not accessable outside of this module. */
+ data structures are not accessible outside of this module. */
struct df_rd_problem_data
{
/* If the number of defs for regnum N is less than
@@ -2147,7 +2147,7 @@ df_ur_add_problem (struct df *df, int fl
----------------------------------------------------------------------------*/
/* Private data used to compute the solution for this problem. These
- data structures are not accessable outside of this module. */
+ data structures are not accessible outside of this module. */
struct df_urec_problem_data
{
bool earlyclobbers_found; /* True if any instruction contains an
@@ -3797,7 +3797,7 @@ static struct df_problem problem_RI =
df_ri_dump, /* Debugging. */
/* Technically this is only dependent on the live registers problem
- but it will produce infomation if built one of uninitialized
+ but it will produce information if built one of uninitialized
register problems (UR, UREC) is also run. */
df_lr_add_problem, /* Dependent problem. */
0 /* Changeable flags. */
Index: df.h
===================================================================
--- df.h (revision 114165)
+++ df.h (working copy)
@@ -214,7 +214,7 @@ struct dataflow
/* The pool to allocate the block_info from. */
alloc_pool block_pool;
- /* Problem specific control infomation. */
+ /* Problem specific control information. */
/* Scanning flags. */
#define DF_HARD_REGS 1 /* Mark hard registers. */
@@ -502,7 +502,7 @@ struct df_ru_bb_info
sparse_kill, each register gets a slot and a 1 in this bitvector
means that all of the uses of that register are killed. This is
a very useful efficiency hack in that it keeps from having push
- around big groups of 1s. This is implemened by the
+ around big groups of 1s. This is implemented by the
bitmap_clear_range call. */
bitmap kill;
Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi (revision 114165)
+++ doc/invoke.texi (working copy)
@@ -1611,7 +1611,7 @@ when used within the DSO@. Enabling thi
on load and link times of a DSO as it massively reduces the size of the
dynamic export table when the library makes heavy use of templates.
-The behaviour of this switch is not quite the same as marking the
+The behavior of this switch is not quite the same as marking the
methods as hidden directly. Normally if there is a class with default
visibility which has a hidden method, the effect of this is that the
method must be defined in only one shared object. This switch does
@@ -3271,7 +3271,7 @@ in some fonts or display methodologies,
been applied. For instance @code{\u207F}, ``SUPERSCRIPT LATIN SMALL
LETTER N'', will display just like a regular @code{n} which has been
placed in a superscript. ISO 10646 defines the @dfn{NFKC}
-normalisation scheme to convert all these into a standard form as
+normalization scheme to convert all these into a standard form as
well, and GCC will warn if your code is not in NFKC if you use
@option{-Wnormalized=nfkc}. This warning is comparable to warning
about every identifier that contains the letter O because it might be
Index: doc/rtl.texi
===================================================================
--- doc/rtl.texi (revision 114165)
+++ doc/rtl.texi (working copy)
@@ -1896,7 +1896,7 @@ still known.
@itemx (ss_neg:@var{m} @var{x})
These two expressions represent the negation (subtraction from zero) of
the value represented by @var{x}, carried out in mode @var{m}. They
-differ in the behaviour on overflow of integer modes. In the case of
+differ in the behavior on overflow of integer modes. In the case of
@code{neg}, the negation of the operand may be a number not representable
in mode @var{m}, in which case it is truncated to @var{m}. @code{ss_neg}
ensures that an out-of-bounds result saturates to the maximum or minimum
@@ -2016,7 +2016,7 @@ fixed-point mode.
@itemx (ss_ashift:@var{m} @var{x} @var{c})
These two expressions represent the result of arithmetically shifting @var{x}
left by @var{c} places. They differ in their behavior on overflow of integer
-modes. An @code{ashift} operation is a plain shift with no special behaviour
+modes. An @code{ashift} operation is a plain shift with no special behavior
in case of a change in the sign bit; @code{ss_ashift} saturates to the minimum
or maximum representable value if any of the bits shifted out differs from the
final sign bit.
Index: doc/tm.texi
===================================================================
--- doc/tm.texi (revision 114165)
+++ doc/tm.texi (working copy)
@@ -3090,7 +3090,7 @@ DWARF 2.
@defmac FRAME_POINTER_CFA_OFFSET (@var{fundecl})
If defined, a C expression whose value is an integer giving the offset
in bytes from the frame pointer to the canonical frame address (cfa).
-The final value should conincide with that calculated by
+The final value should coincide with that calculated by
@code{INCOMING_FRAME_SP_OFFSET}.
Normally the CFA is calculated as an offset from the argument pointer,
Index: except.c
===================================================================
--- except.c (revision 114165)
+++ except.c (working copy)
@@ -3572,7 +3572,7 @@ switch_to_exception_section (void)
/* Output a reference from an exception table to the type_info object TYPE.
- TT_FORMAT and TT_FORMAT_SIZE descibe the DWARF encoding method used for
+ TT_FORMAT and TT_FORMAT_SIZE describe the DWARF encoding method used for
the value. */
static void
Index: final.c
===================================================================
--- final.c (revision 114165)
+++ final.c (working copy)
@@ -380,7 +380,7 @@ init_insn_lengths (void)
}
/* Obtain the current length of an insn. If branch shortening has been done,
- get its actual length. Otherwise, use FALLBACK_FN to calcualte the
+ get its actual length. Otherwise, use FALLBACK_FN to calculate the
length. */
static inline int
get_attr_length_1 (rtx insn ATTRIBUTE_UNUSED,
Index: haifa-sched.c
===================================================================
--- haifa-sched.c (revision 114165)
+++ haifa-sched.c (working copy)
@@ -2765,14 +2765,14 @@ sched_init (void)
spec_info->weakness_cutoff =
(PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
else
- /* So we won't read anything accidently. */
+ /* So we won't read anything accidentally. */
spec_info = 0;
#ifdef ENABLE_CHECKING
check_sched_flags ();
#endif
}
else
- /* So we won't read anything accidently. */
+ /* So we won't read anything accidentally. */
spec_info = 0;
/* Initialize issue_rate. */
Index: lambda-code.c
===================================================================
--- lambda-code.c (revision 114165)
+++ lambda-code.c (working copy)
@@ -623,7 +623,7 @@ compute_nest_using_fourier_motzkin (int
4. Multiply the composed transformation matrix times the matrix form of the
loop.
5. Transform the newly created matrix (from step 4) back into a loop nest
- using fourier motzkin elimination to figure out the bounds. */
+ using Fourier-Motzkin elimination to figure out the bounds. */
static lambda_loopnest
lambda_compute_auxillary_space (lambda_loopnest nest,
@@ -742,7 +742,7 @@ lambda_compute_auxillary_space (lambda_l
lambda_matrix_add_mc (B, 1, B1, -1, B1, size, invariants);
/* Now compute the auxiliary space bounds by first inverting U, multiplying
- it by A1, then performing fourier motzkin. */
+ it by A1, then performing Fourier-Motzkin. */
invertedtrans = lambda_matrix_new (depth, depth);
Index: libgcc2.h
===================================================================
--- libgcc2.h (revision 114165)
+++ libgcc2.h (working copy)
@@ -429,7 +429,7 @@ extern const UQItype __popcount_tab[256]
/* Defined for L_clz. Exported here because some targets may want to use
it for their own versions of the __clz builtins. It contains the bit
position of the first set bit for the numbers 0 - 255. This avoids the
- need for a seperate table for the __ctz builtins. */
+ need for a separate table for the __ctz builtins. */
extern const UQItype __clz_tab[256];
#include "longlong.h"
Index: omp-low.c
===================================================================
--- omp-low.c (revision 114165)
+++ omp-low.c (working copy)
@@ -478,7 +478,7 @@ use_pointer_for_field (tree decl, bool s
if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
return true;
- /* We can only use copy-in/copy-out semantics for shared varibles
+ /* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_p)
{
Index: optabs.c
===================================================================
--- optabs.c (revision 114165)
+++ optabs.c (working copy)
@@ -4741,7 +4741,7 @@ expand_fix (rtx to, rtx from, int unsign
This is not needed. Consider, for instance conversion from SFmode
into DImode.
- The hot path trought the code is dealing with inputs smaller than 2^63
+ The hot path through the code is dealing with inputs smaller than 2^63
and doing just the conversion, so there is no bits to lose.
In the other path we know the value is positive in the range 2^63..2^64-1
Index: predict.c
===================================================================
--- predict.c (revision 114165)
+++ predict.c (working copy)
@@ -1258,7 +1258,7 @@ tree_estimate_probability (void)
{
/* Predict early returns to be probable, as we've already taken
care for error returns and other cases are often used for
- fast paths trought function. */
+ fast paths through function. */
if (e->dest == EXIT_BLOCK_PTR
&& TREE_CODE (last_stmt (bb)) == RETURN_EXPR
&& !single_pred_p (bb))
Index: reload.c
===================================================================
--- reload.c (revision 114165)
+++ reload.c (working copy)
@@ -5375,7 +5375,7 @@ find_reloads_address_1 (enum machine_mod
GET_MODE (orig_op1))));
}
/* Plus in the index register may be created only as a result of
- register remateralization for expression like &localvar*4. Reload it.
+ register rematerialization for expression like &localvar*4. Reload it.
It may be possible to combine the displacement on the outer level,
but it is probably not worthwhile to do so. */
if (context == 1)
Index: tree-flow.h
===================================================================
--- tree-flow.h (revision 114165)
+++ tree-flow.h (working copy)
@@ -263,7 +263,7 @@ typedef struct immediate_use_iterator_d
/* Use this iterator in combination with FOR_EACH_IMM_USE_STMT to
- get access to each occurence of ssavar on the stmt returned by
+ get access to each occurrence of ssavar on the stmt returned by
that iterator.. for instance:
FOR_EACH_IMM_USE_STMT (stmt, iter, var)
Index: tree-outof-ssa.c
===================================================================
--- tree-outof-ssa.c (revision 114165)
+++ tree-outof-ssa.c (working copy)
@@ -2214,7 +2214,7 @@ analyze_edges_for_bb (basic_block bb)
leader_match = leader;
/* The tree_* cfg manipulation routines use the PENDING_EDGE field
- for various PHI manipulations, so it gets cleared whhen calls are
+ for various PHI manipulations, so it gets cleared when calls are
made to make_forwarder_block(). So make sure the edge is clear,
and use the saved stmt list. */
PENDING_STMT (leader) = NULL;
Index: tree-ssa-dce.c
===================================================================
--- tree-ssa-dce.c (revision 114165)
+++ tree-ssa-dce.c (working copy)
@@ -720,7 +720,7 @@ remove_dead_stmt (block_stmt_iterator *i
nothing to the program, then we not only remove it, but we also change
the flow graph so that the current block will simply fall-thru to its
immediate post-dominator. The blocks we are circumventing will be
- removed by cleaup_tree_cfg if this change in the flow graph makes them
+ removed by cleanup_tree_cfg if this change in the flow graph makes them
unreachable. */
if (is_ctrl_stmt (t))
{
Index: tree-ssa-pre.c
===================================================================
--- tree-ssa-pre.c (revision 114165)
+++ tree-ssa-pre.c (working copy)
@@ -264,7 +264,7 @@ typedef struct bb_value_sets
bitmap rvuse_gen;
bitmap rvuse_kill;
- /* For actually occuring loads, as long as they occur before all the
+ /* For actually occurring loads, as long as they occur before all the
other stores in the block, we know they are antic at the top of
the block, regardless of RVUSE_KILL. */
value_set_t antic_safe_loads;
Index: tree-vect-transform.c
===================================================================
--- tree-vect-transform.c (revision 114165)
+++ tree-vect-transform.c (working copy)
@@ -3036,7 +3036,7 @@ vect_transform_loop (loop_vec_info loop_
bsi_insert_before (&cond_exp_bsi, cond_expr_stmt_list, BSI_SAME_STMT);
}
- /* CHECKME: we wouldn't need this if we calles update_ssa once
+ /* CHECKME: we wouldn't need this if we called update_ssa once
for all loops. */
bitmap_zero (vect_vnames_to_rename);
^ permalink raw reply [flat|nested] 38+ messages in thread
* Re: [patch] gcc/*: Fix comment typos.
2006-04-22 16:20 Kazu Hirata
@ 2006-04-22 16:32 ` Joseph S. Myers
0 siblings, 0 replies; 38+ messages in thread
From: Joseph S. Myers @ 2006-04-22 16:32 UTC (permalink / raw)
To: Kazu Hirata; +Cc: gcc-patches
On Sat, 22 Apr 2006, Kazu Hirata wrote:
> Index: config/soft-fp/op-common.h
> ===================================================================
> --- config/soft-fp/op-common.h (revision 113174)
> +++ config/soft-fp/op-common.h (working copy)
> @@ -35,7 +35,7 @@
> _FP_FRAC_DECL_##wc(X)
>
> /*
> - * Finish truely unpacking a native fp value by classifying the kind
> + * Finish truly unpacking a native fp value by classifying the kind
> * of fp value and normalizing both the exponent and the fraction.
> */
Patches to soft-fp must go in glibc CVS before they go in GCC (then, the
whole new patched file should be imported into GCC, rather than importing
changes piecemeal). I don't see this change in glibc CVS.
--
Joseph S. Myers http://www.srcf.ucam.org/~jsm28/gcc/
jsm@polyomino.org.uk (personal mail)
joseph@codesourcery.com (CodeSourcery mail)
jsm28@gcc.gnu.org (Bugzilla assignments and CCs)
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-04-22 16:20 Kazu Hirata
2006-04-22 16:32 ` Joseph S. Myers
0 siblings, 1 reply; 38+ messages in thread
From: Kazu Hirata @ 2006-04-22 16:20 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-04-22 Kazu Hirata <kazu@codesourcery.com>
* config/soft-fp/op-common.h, double-int.h, tree-ssa-dom.c:
Fix comment typos.
* doc/tm.texi: Fix a typo.
Index: config/soft-fp/op-common.h
===================================================================
--- config/soft-fp/op-common.h (revision 113174)
+++ config/soft-fp/op-common.h (working copy)
@@ -35,7 +35,7 @@
_FP_FRAC_DECL_##wc(X)
/*
- * Finish truely unpacking a native fp value by classifying the kind
+ * Finish truly unpacking a native fp value by classifying the kind
* of fp value and normalizing both the exponent and the fraction.
*/
Index: doc/tm.texi
===================================================================
--- doc/tm.texi (revision 113174)
+++ doc/tm.texi (working copy)
@@ -9144,7 +9144,7 @@ such cases may improve things.
@end defmac
@deftypefn {Target Hook} int TARGET_MODE_REP_EXTENDED (enum machine_mode @var{mode}, enum machine_mode @var{rep_mode})
-The representation of an intergral mode can be such that the values
+The representation of an integral mode can be such that the values
are always extended to a wider integral mode. Return
@code{SIGN_EXTEND} if values of @var{mode} are represented in
sign-extended form to @var{rep_mode}. Return @code{UNKNOWN}
Index: double-int.h
===================================================================
--- double-int.h (revision 113174)
+++ double-int.h (working copy)
@@ -44,7 +44,7 @@ Software Foundation, 51 Franklin Street,
??? The components of double_int differ in signedness mostly for
historical reasons (they replace an older structure used to represent
- numbers with precision wigher than HOST_WIDE_INT). It might be less
+ numbers with precision higher than HOST_WIDE_INT). It might be less
confusing to have them both signed or both unsigned. */
typedef struct
Index: tree-ssa-dom.c
===================================================================
--- tree-ssa-dom.c (revision 113174)
+++ tree-ssa-dom.c (working copy)
@@ -2439,7 +2439,7 @@ eliminate_degenerate_phis (void)
A set bit indicates that the statement or PHI node which
defines the SSA_NAME should be (re)examined to determine if
- it has become a degenerate PHI or trival const/copy propagation
+ it has become a degenerate PHI or trivial const/copy propagation
opportunity.
Experiments have show we generally get better compilation
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-04-15 21:45 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2006-04-15 21:45 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-04-15 Kazu Hirata <kazu@codesourcery.com>
* cfgcleanup.c, config/dfp-bit.c, expr.c, fold-const.c,
jump.c, mips-tfile.c, omp-low.c, sched-int.h,
tree-ssa-loop-prefetch.c, tree-vrp.c: Fix comment typos.
Index: cfgcleanup.c
===================================================================
--- cfgcleanup.c (revision 112970)
+++ cfgcleanup.c (working copy)
@@ -1667,7 +1667,7 @@ try_crossjump_to_edge (int mode, edge e1
&& (newpos1 != BB_HEAD (src1)))
return false;
- /* Avoid deleting preserve label when redirecting ABNORMAL edeges. */
+ /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
if (block_has_preserve_label (e1->dest)
&& (e1->flags & EDGE_ABNORMAL))
return false;
Index: config/dfp-bit.c
===================================================================
--- config/dfp-bit.c (revision 112970)
+++ config/dfp-bit.c (working copy)
@@ -411,7 +411,7 @@ DFP_TO_INT (DFP_C_TYPE x)
TO_INTERNAL (&s, &n1);
/* Rescale if the exponent is less than zero. */
decNumberToIntegralValue (&n2, &n1, &context);
- /* Get a value to use for the quanitize call. */
+ /* Get a value to use for the quantize call. */
decNumberFromString (&qval, (char *) "1.0", &context);
/* Force the exponent to zero. */
decNumberQuantize (&n1, &n2, &qval, &context);
Index: expr.c
===================================================================
--- expr.c (revision 112970)
+++ expr.c (working copy)
@@ -7717,7 +7717,7 @@ expand_expr_real_1 (tree exp, rtx target
else if (!MEM_P (op0))
{
/* If the operand is not a MEM, force it into memory. Since we
- are going to be be changing the mode of the MEM, don't call
+ are going to be changing the mode of the MEM, don't call
force_const_mem for constants because we don't allow pool
constants to change mode. */
tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0));
Index: fold-const.c
===================================================================
--- fold-const.c (revision 112970)
+++ fold-const.c (working copy)
@@ -7097,7 +7097,7 @@ native_interpret_vector (tree type, unsi
}
-/* Subroutine of fold_view_convert_expr. Interpet the contents of
+/* Subroutine of fold_view_convert_expr. Interpret the contents of
the buffer PTR of length LEN as a constant of type TYPE. For
INTEGRAL_TYPE_P we return an INTEGER_CST, for SCALAR_FLOAT_TYPE_P
we return a REAL_CST, etc... If the buffer cannot be interpreted,
Index: jump.c
===================================================================
--- jump.c (revision 112970)
+++ jump.c (working copy)
@@ -21,7 +21,7 @@ Software Foundation, 51 Franklin Street,
02110-1301, USA. */
/* This is the pathetic reminder of old fame of the jump-optimization pass
- of the compiler. Now it contains basically set of utility function to
+ of the compiler. Now it contains basically a set of utility functions to
operate with jumps.
Each CODE_LABEL has a count of the times it is used
Index: mips-tfile.c
===================================================================
--- mips-tfile.c (revision 112970)
+++ mips-tfile.c (working copy)
@@ -4363,7 +4363,7 @@ copy_object (void)
/* Read in each of the sections if they exist in the object file.
- We read things in in the order the mips assembler creates the
+ We read things in the order the mips assembler creates the
sections, so in theory no extra seeks are done.
For simplicity sake, round each read up to a page boundary,
Index: omp-low.c
===================================================================
--- omp-low.c (revision 112970)
+++ omp-low.c (working copy)
@@ -2233,10 +2233,10 @@ remove_exit_barrier (struct omp_region *
exit_bb = region->exit;
/* The last insn in the block will be the parallel's OMP_RETURN. The
- workshare's OMP_RETURN will be in a preceeding block. The kinds of
+ workshare's OMP_RETURN will be in a preceding block. The kinds of
statements that can appear in between are extremely limited -- no
memory operations at all. Here, we allow nothing at all, so the
- only thing we allow to preceed this OMP_RETURN is a label. */
+ only thing we allow to precede this OMP_RETURN is a label. */
si = bsi_last (exit_bb);
gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
bsi_prev (&si);
Index: sched-int.h
===================================================================
--- sched-int.h (revision 112970)
+++ sched-int.h (working copy)
@@ -437,7 +437,7 @@ enum SPEC_TYPES_OFFSETS {
/* This dependence is to the instruction in the recovery block, that was
formed to recover after control-speculation failure.
- Thus, this dependence can be be overcome with generating of the copy of
+ Thus, this dependence can be overcome with generating of the copy of
this instruction in the recovery block. */
#define BE_IN_CONTROL (((ds_t) DEP_WEAK_MASK) << BE_IN_CONTROL_BITS_OFFSET)
Index: tree-ssa-loop-prefetch.c
===================================================================
--- tree-ssa-loop-prefetch.c (revision 112970)
+++ tree-ssa-loop-prefetch.c (working copy)
@@ -204,7 +204,7 @@ struct mem_ref
struct mem_ref *next; /* The next reference in the group. */
};
-/* Dumps information obout reference REF to FILE. */
+/* Dumps information about reference REF to FILE. */
static void
dump_mem_ref (FILE *file, struct mem_ref *ref)
Index: tree-vrp.c
===================================================================
--- tree-vrp.c (revision 112970)
+++ tree-vrp.c (working copy)
@@ -1703,7 +1703,7 @@ extract_range_from_unary_expr (value_ran
new_max = fold_convert (outer_type, orig_max);
/* Verify the new min/max values are gimple values and
- that they compare equal to the orignal input's
+ that they compare equal to the original input's
min/max values. */
if (is_gimple_val (new_min)
&& is_gimple_val (new_max)
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2006-04-08 17:01 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2006-04-08 17:01 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2006-04-08 Kazu Hirata <kazu@codesourcery.com>
* builtins.c, config/arm/arm.c, config/i386/cygwin.h,
config/i386/i386.c, config/ia64/ia64.c, config/s390/fixdfdi.h,
config/sh/sh.c, config/sh/sh.h, df-scan.c, except.c,
haifa-sched.c, optabs.c, rtl.h, sched-deps.c, sched-int.h,
sched-rgn.c, tree-inline.h, tree-ssa-dom.c,
tree-ssa-loop-prefetch.c, tree-ssa-operands.c,
tree-vect-patterns.c, tree-vrp.c: Fix comment typos. Follow
spelling convensions.
* config/ia64/ia64.opt, doc/contrib.texi, doc/invoke.texi,
doc/passes.texi, doc/tm.texi, doc/tree-ssa.texi: Fix comment
typos. Follow spelling conventions.
Index: builtins.c
===================================================================
--- builtins.c (revision 112778)
+++ builtins.c (working copy)
@@ -278,7 +278,7 @@ get_pointer_alignment (tree exp, unsigne
inner = max_align;
while (handled_component_p (exp))
{
- /* Fields in a structure can be packed, honour DECL_ALIGN
+ /* Fields in a structure can be packed, honor DECL_ALIGN
of the FIELD_DECL. For all other references the conservative
alignment is the element type alignment. */
if (TREE_CODE (exp) == COMPONENT_REF)
Index: config/arm/arm.c
===================================================================
--- config/arm/arm.c (revision 112778)
+++ config/arm/arm.c (working copy)
@@ -8003,7 +8003,7 @@ push_minipool_fix (rtx insn, HOST_WIDE_I
/* If an entry requires 8-byte alignment then assume all constant pools
require 4 bytes of padding. Trying to do this later on a per-pool
- basis is awkward becuse existing pool entries have to be modified. */
+ basis is awkward because existing pool entries have to be modified. */
if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
minipool_pad = 4;
Index: config/i386/cygwin.h
===================================================================
--- config/i386/cygwin.h (revision 112778)
+++ config/i386/cygwin.h (working copy)
@@ -235,6 +235,6 @@ while (0)
/* Every program on cygwin links against cygwin1.dll which contains
the pthread routines. There is no need to explicitly link them
- and the -pthread flag is not recognised. */
+ and the -pthread flag is not recognized. */
#undef GOMP_SELF_SPECS
#define GOMP_SELF_SPECS ""
Index: config/i386/i386.c
===================================================================
--- config/i386/i386.c (revision 112778)
+++ config/i386/i386.c (working copy)
@@ -16372,7 +16372,7 @@ ix86_preferred_reload_class (rtx x, enum
if (x == CONST0_RTX (mode))
return class;
- /* Force constants into memory if we are loading a (non-zero) constant into
+ /* Force constants into memory if we are loading a (nonzero) constant into
an MMX or SSE register. This is because there are no MMX/SSE instructions
to load from a constant. */
if (CONSTANT_P (x)
Index: config/ia64/ia64.c
===================================================================
--- config/ia64/ia64.c (revision 112778)
+++ config/ia64/ia64.c (working copy)
@@ -8306,7 +8306,7 @@ ia64_ld_address_bypass_p (rtx producer,
if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
mem = XVECEXP (mem, 0, 0);
else if (GET_CODE (mem) == IF_THEN_ELSE)
- /* ??? Is this bypass neccessary for ld.c? */
+ /* ??? Is this bypass necessary for ld.c? */
{
gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
mem = XEXP (mem, 1);
Index: config/ia64/ia64.opt
===================================================================
--- config/ia64/ia64.opt (revision 112778)
+++ config/ia64/ia64.opt (working copy)
@@ -134,11 +134,11 @@ Print information about speculative moti
msched-prefer-non-data-spec-insns
Common Report Var(mflag_sched_prefer_non_data_spec_insns) Init(0)
-If set, data speculative instructions will be choosen for schedule only if there are no other choices at the moment
+If set, data speculative instructions will be chosen for schedule only if there are no other choices at the moment
msched-prefer-non-control-spec-insns
Common Report Var(mflag_sched_prefer_non_control_spec_insns) Init(0)
-If set, control speculative instructions will be choosen for schedule only if there are no other choices at the moment
+If set, control speculative instructions will be chosen for schedule only if there are no other choices at the moment
msched-count-spec-in-critical-path
Common Report Var(mflag_sched_count_spec_in_critical_path) Init(0)
Index: config/s390/fixdfdi.h
===================================================================
--- config/s390/fixdfdi.h (revision 112778)
+++ config/s390/fixdfdi.h (working copy)
@@ -74,7 +74,7 @@ __fixunstfdi (long double a1)
if (exp <= -PRECISION)
return 0;
- /* NaN: All exponent bits set and a non-zero fraction. */
+ /* NaN: All exponent bits set and a nonzero fraction. */
if ((EXPD(dl1) == 0x7fff) && !FRACD_ZERO_P (dl1))
return 0x0ULL;
@@ -146,7 +146,7 @@ __fixtfdi (long double a1)
if (exp <= -PRECISION)
return 0;
- /* NaN: All exponent bits set and a non-zero fraction. */
+ /* NaN: All exponent bits set and a nonzero fraction. */
if ((EXPD(dl1) == 0x7fff) && !FRACD_ZERO_P (dl1))
return 0x8000000000000000ULL;
Index: config/sh/sh.c
===================================================================
--- config/sh/sh.c (revision 112778)
+++ config/sh/sh.c (working copy)
@@ -6818,7 +6818,7 @@ sh_va_start (tree valist, rtx nextarg)
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
-/* TYPE is a RECORD_TYPE. If there is only a single non-zero-sized
+/* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
member, return it. */
static tree
find_sole_member (tree type)
Index: config/sh/sh.h
===================================================================
--- config/sh/sh.h (revision 112778)
+++ config/sh/sh.h (working copy)
@@ -507,8 +507,8 @@ enum sh_divide_strategy_e {
SH_DIV_INV_CALL2,
SH_DIV_INV_FP,
/* SH1 .. SH4 strategies. Because of the small number of registers
- available, the compiler uses knowledge of the actual et of registers
- being clobbed by the different functions called. */
+ available, the compiler uses knowledge of the actual set of registers
+ being clobbered by the different functions called. */
SH_DIV_CALL_DIV1, /* No FPU, medium size, highest latency. */
SH_DIV_CALL_FP, /* FPU needed, small size, high latency. */
SH_DIV_CALL_TABLE, /* No FPU, large size, medium latency. */
Index: df-scan.c
===================================================================
--- df-scan.c (revision 112778)
+++ df-scan.c (working copy)
@@ -1603,7 +1603,7 @@ df_bb_refs_record (struct dataflow *dflo
DF_REF_ARTIFICIAL | DF_REF_AT_TOP);
#endif
- /* The following code (down thru the arg_pointer seting APPEARS
+ /* The following code (down thru the arg_pointer setting APPEARS
to be necessary because there is nothing that actually
describes what the exception handling code may actually need
to keep alive. */
Index: doc/contrib.texi
===================================================================
--- doc/contrib.texi (revision 112778)
+++ doc/contrib.texi (working copy)
@@ -1300,7 +1300,7 @@ Ingo Proetel for @code{Image}, @code{Log
updates.
@item
-Olga Rodimina for @code{MenuSelectionManager} implemenation.
+Olga Rodimina for @code{MenuSelectionManager} implementation.
@item
Jan Roehrich for @code{BasicTreeUI} and @code{JTree} fixes.
@@ -1324,7 +1324,7 @@ Christian Thalinger for 64-bit cleanups,
interface fixes and @code{CACAO} integration, @code{fdlibm} updates.
@item
-Gael Thomas for @code{VMClassLoader} boot packages support sugestions.
+Gael Thomas for @code{VMClassLoader} boot packages support suggestions.
@item
Andreas Tobler for Darwin and Solaris testing and fixing, @code{Qt4}
Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi (revision 112778)
+++ doc/invoke.texi (working copy)
@@ -9170,7 +9170,7 @@ Intel PentiumMMX CPU based on Pentium co
Intel PentiumPro CPU@.
@item i686
Same as @code{generic}, but when used as @code{march} option, PentiumPro
-instruction set will be used, so the code will run on all i686 familly chips.
+instruction set will be used, so the code will run on all i686 family chips.
@item pentium2
Intel Pentium2 CPU based on PentiumPro core with MMX instruction set support.
@item pentium3, pentium3m
@@ -9844,7 +9844,7 @@ The default is 'disable'.
@itemx -msched-prefer-non-data-spec-insns
@opindex -mno-sched-prefer-non-data-spec-insns
@opindex -msched-prefer-non-data-spec-insns
-If enabled, data speculative instructions will be choosen for schedule
+If enabled, data speculative instructions will be chosen for schedule
only if there are no other choices at the moment. This will make
the use of the data speculation much more conservative.
The default is 'disable'.
@@ -9853,7 +9853,7 @@ The default is 'disable'.
@itemx -msched-prefer-non-control-spec-insns
@opindex -mno-sched-prefer-non-control-spec-insns
@opindex -msched-prefer-non-control-spec-insns
-If enabled, control speculative instructions will be choosen for schedule
+If enabled, control speculative instructions will be chosen for schedule
only if there are no other choices at the moment. This will make
the use of the control speculation much more conservative.
The default is 'disable'.
@@ -9862,7 +9862,7 @@ The default is 'disable'.
@itemx -msched-count-spec-in-critical-path
@opindex -mno-sched-count-spec-in-critical-path
@opindex -msched-count-spec-in-critical-path
-If enabled, speculative depedencies will be considered during
+If enabled, speculative dependencies will be considered during
computation of the instructions priorities. This will make the use of the
speculation a bit more conservative.
The default is 'disable'.
Index: doc/passes.texi
===================================================================
--- doc/passes.texi (revision 112778)
+++ doc/passes.texi (working copy)
@@ -202,7 +202,7 @@ declarations of static variables whose l
program. The pass is located in @file{tree-mudflap.c} and is described
by @code{pass_mudflap_1}.
-@item OpenMP lowerering
+@item OpenMP lowering
If OpenMP generation (@option{-fopenmp}) is enabled, this pass lowers
OpenMP constructs into GIMPLE.
Index: doc/tm.texi
===================================================================
--- doc/tm.texi (revision 112778)
+++ doc/tm.texi (working copy)
@@ -4191,7 +4191,7 @@ be because the function prologue moves i
the context of the called function, and @code{0} in the context of
the caller.
-If @var{incoming} is non-zero and the address is to be found on the
+If @var{incoming} is nonzero and the address is to be found on the
stack, return a @code{mem} which refers to the frame pointer. If
@var{incoming} is @code{2}, the result is being used to fetch the
structure value address at the beginning of a function. If you need
@@ -6024,7 +6024,7 @@ This hook is a modified version of @samp
of passing dependence as a second parameter, it passes a type of that
dependence. This is useful to calculate cost of dependence between insns
not having the corresponding link. If @samp{TARGET_SCHED_ADJUST_COST_2} is
-definded it is used instead of @samp{TARGET_SCHED_ADJUST_COST}.
+defined it is used instead of @samp{TARGET_SCHED_ADJUST_COST}.
@end deftypefn
@deftypefn {Target Hook} void TARGET_SCHED_H_I_D_EXTENDED (void)
@@ -6046,7 +6046,7 @@ the generated speculative pattern.
@deftypefn {Target Hook} int TARGET_SCHED_NEEDS_BLOCK_P (rtx @var{insn})
This hook is called by the insn scheduler during generation of recovery code
-for @var{insn}. It should return non-zero, if the corresponding check
+for @var{insn}. It should return nonzero, if the corresponding check
instruction should branch to recovery code, or zero otherwise.
@end deftypefn
@@ -6056,7 +6056,7 @@ check instruction. If @var{mutate_p} is
speculative instruction for which the check should be generated.
@var{label} is either a label of a basic block, where recovery code should
be emitted, or a null pointer, when requested check doesn't branch to
-recovery code (a simple check). If @var{mutate_p} is non-zero, then
+recovery code (a simple check). If @var{mutate_p} is nonzero, then
a pattern for a branchy check corresponding to a simple check denoted by
@var{insn} should be generated. In this case @var{label} can't be null.
@end deftypefn
@@ -6067,7 +6067,7 @@ This hook is used as a workaround for
called on the first instruction of the ready list. The hook is used to
discard speculative instruction that stand first in the ready list from
being scheduled on the current cycle. For non-speculative instructions,
-the hook should always return non-zero. For example, in the ia64 backend
+the hook should always return nonzero. For example, in the ia64 backend
the hook is used to cancel data speculative insns when the ALAT table
is nearly full.
@end deftypefn
@@ -6077,7 +6077,7 @@ This hook is used by the insn scheduler
enabled/used. @var{flags} initially may have either the SCHED_RGN or SCHED_EBB
bit set. This denotes the scheduler pass for which the data should be
provided. The target backend should modify @var{flags} by modifying
-the bits correponding to the following features: USE_DEPS_LIST, USE_GLAT,
+the bits corresponding to the following features: USE_DEPS_LIST, USE_GLAT,
DETACH_LIFE_INFO, and DO_SPECULATION. For the DO_SPECULATION feature
an additional structure @var{spec_info} should be filled by the target.
The structure describes speculation types that can be used in the scheduler.
Index: doc/tree-ssa.texi
===================================================================
--- doc/tree-ssa.texi (revision 112778)
+++ doc/tree-ssa.texi (working copy)
@@ -1392,7 +1392,7 @@ mainly because the set of virtual operan
what some would consider unexpected times. In general, any time you
have modified a statement that has virtual operands, you should verify
whether the list of virtual operands has changed, and if so, mark the
-newly exposed symbols by callnig @code{mark_new_vars_to_rename}.
+newly exposed symbols by calling @code{mark_new_vars_to_rename}.
There is one additional caveat to preserving virtual SSA form. When the
entire set of virtual operands may be eliminated due to better
Index: except.c
===================================================================
--- except.c (revision 112778)
+++ except.c (working copy)
@@ -858,7 +858,7 @@ current_function_has_exception_handlers
}
\f
/* A subroutine of duplicate_eh_regions. Search the region tree under O
- for the miniumum and maximum region numbers. Update *MIN and *MAX. */
+ for the minimum and maximum region numbers. Update *MIN and *MAX. */
static void
duplicate_eh_regions_0 (eh_region o, int *min, int *max)
@@ -912,7 +912,7 @@ duplicate_eh_regions_1 (eh_region old, e
return ret;
}
-/* Duplicate the EH regions of IFUN, rootted at COPY_REGION, into current
+/* Duplicate the EH regions of IFUN, rooted at COPY_REGION, into current
function and root the tree below OUTER_REGION. Remap labels using MAP
callback. The special case of COPY_REGION of 0 means all regions. */
Index: haifa-sched.c
===================================================================
--- haifa-sched.c (revision 112778)
+++ haifa-sched.c (working copy)
@@ -217,7 +217,7 @@ static spec_info_t spec_info;
Used to determine, if we need to fix INSN_TICKs. */
static bool added_recovery_block_p;
-/* Counters of different types of speculative isntructions. */
+/* Counters of different types of speculative instructions. */
static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
/* Pointers to GLAT data. See init_glat for more information. */
@@ -977,7 +977,7 @@ ready_lastpos (struct ready_list *ready)
}
/* Add an element INSN to the ready list so that it ends up with the
- lowest/highest priority dependending on FIRST_P. */
+ lowest/highest priority depending on FIRST_P. */
HAIFA_INLINE static void
ready_add (struct ready_list *ready, rtx insn, bool first_p)
@@ -1598,7 +1598,7 @@ find_insn_reg_weight (basic_block bb)
find_insn_reg_weight1 (insn);
}
-/* Calculate INSN_REG_WEIGHT for single insntruction.
+/* Calculate INSN_REG_WEIGHT for single instruction.
Separated from find_insn_reg_weight because of need
to initialize new instruction in generate_recovery_code. */
static void
@@ -2034,7 +2034,7 @@ static int cached_issue_rate = 0;
make this function tries different samples of ready insns. READY
is current queue `ready'. Global array READY_TRY reflects what
insns are already issued in this try. MAX_POINTS is the sum of points
- of all instructions in READY. The function stops immediatelly,
+ of all instructions in READY. The function stops immediately,
if it reached the such a solution, that all instruction can be issued.
INDEX will contain index of the best insn in READY. The following
function is used only for first cycle multipass scheduling. */
@@ -2463,7 +2463,7 @@ schedule_block (basic_block *target_bb,
continue;
}
- /* DECISSION is made. */
+ /* DECISION is made. */
if (TODO_SPEC (insn) & SPECULATIVE)
generate_recovery_code (insn);
@@ -2472,7 +2472,7 @@ schedule_block (basic_block *target_bb,
/* This is used to to switch basic blocks by request
from scheduler front-end (actually, sched-ebb.c only).
This is used to process blocks with single fallthru
- edge. If successing block has jump, it [jump] will try
+ edge. If succeeding block has jump, it [jump] will try
move at the end of current bb, thus corrupting CFG. */
|| current_sched_info->advance_target_bb (*target_bb, insn))
{
@@ -2869,7 +2869,7 @@ sched_finish (void)
}
/* Fix INSN_TICKs of the instructions in the current block as well as
- INSN_TICKs of their dependants.
+ INSN_TICKs of their dependents.
HEAD and TAIL are the begin and the end of the current scheduled block. */
static void
fix_inter_tick (rtx head, rtx tail)
@@ -3116,7 +3116,7 @@ fix_tick_ready (rtx next)
tick = INSN_TICK (next);
/* if tick is not equal to INVALID_TICK, then update
INSN_TICK of NEXT with the most recent resolved dependence
- cost. Overwise, recalculate from scratch. */
+ cost. Otherwise, recalculate from scratch. */
full_p = tick == INVALID_TICK;
do
{
@@ -3163,7 +3163,7 @@ change_queue_index (rtx next, int delay)
/* We have nothing to do. */
return;
- /* Remove NEXT from whereever it is now. */
+ /* Remove NEXT from wherever it is now. */
if (i == QUEUE_READY)
ready_remove_insn (next);
else if (i >= 0)
@@ -3318,7 +3318,7 @@ process_insn_depend_be_in_spec (rtx link
{
gcc_assert (!(ds & BE_IN_SPEC));
- if (/* If this dep can be overcomed with 'begin speculation'. */
+ if (/* If this dep can be overcome with 'begin speculation'. */
ds & BEGIN_SPEC)
/* Then we have a choice: keep the dep 'begin speculative'
or transform it into 'be in speculative'. */
@@ -3421,7 +3421,7 @@ add_to_speculative_block (rtx insn)
twins = alloc_INSN_LIST (twin, twins);
- /* Add dependences between TWIN and all apropriate
+ /* Add dependences between TWIN and all appropriate
instructions from REC. */
do
{
@@ -3696,7 +3696,7 @@ create_check_block_twin (rtx insn, bool
gcc_assert (ORIG_PAT (insn));
- /* Initialize TWIN (twin is a dublicate of original instruction
+ /* Initialize TWIN (twin is a duplicate of original instruction
in the recovery block). */
if (rec != EXIT_BLOCK_PTR)
{
@@ -3896,7 +3896,7 @@ create_check_block_twin (rtx insn, bool
add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
if (!mutate_p)
- /* Fix priorities. If MUTATE_P is nonzero, this is not neccessary,
+ /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
because it'll be done later in add_to_speculative_block. */
{
clear_priorities (twin);
@@ -3906,7 +3906,7 @@ create_check_block_twin (rtx insn, bool
/* Removes dependency between instructions in the recovery block REC
and usual region instructions. It keeps inner dependences so it
- won't be neccessary to recompute them. */
+ won't be necessary to recompute them. */
static void
fix_recovery_deps (basic_block rec)
{
@@ -4065,7 +4065,7 @@ dump_new_block_header (int i, basic_bloc
/* Unlink basic block notes and labels and saves them, so they
can be easily restored. We unlink basic block notes in EBB to
- provide back-compatability with the previous code, as target backends
+ provide back-compatibility with the previous code, as target backends
assume, that there'll be only instructions between
current_sched_info->{head and tail}. We restore these notes as soon
as we can.
@@ -4308,8 +4308,8 @@ move_succs (VEC(edge,gc) **succsp, basic
/* Initialize GLAT (global_live_at_{start, end}) structures.
GLAT structures are used to substitute global_live_{start, end}
- regsets during scheduling. This is neccessary to use such functions as
- split_block (), as they assume consistancy of register live information. */
+ regsets during scheduling. This is necessary to use such functions as
+ split_block (), as they assume consistency of register live information. */
static void
init_glat (void)
{
@@ -4530,7 +4530,7 @@ debug_spec_status (ds_t s)
}
/* Helper function for check_cfg.
- Return non-zero, if edge vector pointed to by EL has edge with TYPE in
+ Return nonzero, if edge vector pointed to by EL has edge with TYPE in
its flags. */
static int
has_edge_p (VEC(edge,gc) *el, int type)
@@ -4631,7 +4631,7 @@ check_cfg (rtx head, rtx tail)
gcc_assert (bb == 0);
}
-/* Perform few consistancy checks of flags in different data structures. */
+/* Perform a few consistency checks of flags in different data structures. */
static void
check_sched_flags (void)
{
@@ -4650,7 +4650,7 @@ check_sched_flags (void)
/* Check global_live_at_{start, end} regsets.
If FATAL_P is TRUE, then abort execution at the first failure.
- Overwise, print diagnostics to STDERR (this mode is for calling
+ Otherwise, print diagnostics to STDERR (this mode is for calling
from debugger). */
void
check_reg_live (bool fatal_p)
Index: optabs.c
===================================================================
--- optabs.c (revision 112778)
+++ optabs.c (working copy)
@@ -3710,7 +3710,7 @@ prepare_cmp_insn (rtx *px, rtx *py, enum
to the modified comparison. For signed comparisons compare the
result against 1 in the biased case, and zero in the unbiased
case. For unsigned comparisons always compare against 1 after
- biasing the unbased result by adding 1. This gives us a way to
+ biasing the unbiased result by adding 1. This gives us a way to
represent LTU. */
*px = result;
*pmode = word_mode;
Index: rtl.h
===================================================================
--- rtl.h (revision 112778)
+++ rtl.h (working copy)
@@ -361,7 +361,7 @@ struct rtvec_def GTY(()) {
/* Predicate yielding nonzero iff X is an rtx for a memory location. */
#define MEM_P(X) (GET_CODE (X) == MEM)
-/* Prediacte yielding nonzero iff X is an rtx for a constant integer. */
+/* Predicate yielding nonzero iff X is an rtx for a constant integer. */
#define CONST_INT_P(X) (GET_CODE (X) == CONST_INT)
/* Predicate yielding nonzero iff X is a label insn. */
Index: sched-deps.c
===================================================================
--- sched-deps.c (revision 112778)
+++ sched-deps.c (working copy)
@@ -227,7 +227,7 @@ sched_insns_conditions_mutex_p (rtx insn
\f
/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
LOG_LINKS of INSN, if it is not already there. DEP_TYPE indicates the
- type of dependence that this link represents. DS, if non-zero,
+ type of dependence that this link represents. DS, if nonzero,
indicates speculations, through which this dependence can be overcome.
MEM1 and MEM2, if non-null, corresponds to memory locations in case of
data speculation. The function returns a value indicating if an old entry
@@ -2187,7 +2187,7 @@ check_dep_status (enum reg_note dt, ds_t
gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
}
- /* Check that true and anti depencies can't have other speculative
+ /* Check that true and anti dependencies can't have other speculative
statuses. */
if (ds & DEP_TRUE)
gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
Index: sched-int.h
===================================================================
--- sched-int.h (revision 112778)
+++ sched-int.h (working copy)
@@ -321,7 +321,7 @@ struct haifa_insn_data
(e.g. add_dependence was invoked with (insn == elem)). */
unsigned int has_internal_dep : 1;
- /* What speculations are neccessary to apply to schedule the instruction. */
+ /* What speculations are necessary to apply to schedule the instruction. */
ds_t todo_spec;
/* What speculations were already applied. */
ds_t done_spec;
@@ -359,7 +359,7 @@ extern regset *glat_start, *glat_end;
#define RECOVERY_BLOCK(INSN) (h_i_d[INSN_UID (INSN)].recovery_block)
#define ORIG_PAT(INSN) (h_i_d[INSN_UID (INSN)].orig_pat)
-/* DEP_STATUS of the link incapsulates information, that is needed for
+/* DEP_STATUS of the link encapsulates information, that is needed for
speculative scheduling. Namely, it is 4 integers in the range
[0, MAX_DEP_WEAK] and 3 bits.
The integers correspond to the probability of the dependence to *not*
@@ -374,7 +374,7 @@ extern regset *glat_start, *glat_end;
to know just the major type of all the dependence between two instructions,
as only true dependence can be overcome.
There also is the 4-th bit in the DEP_STATUS (HARD_DEP), that is reserved
- for using to describe instruction's status. It is set whenever instuction
+ for using to describe instruction's status. It is set whenever instruction
has at least one dependence, that cannot be overcome.
See also: check_dep_status () in sched-deps.c . */
#define DEP_STATUS(LINK) XINT (LINK, 2)
@@ -421,27 +421,27 @@ enum SPEC_TYPES_OFFSETS {
/* The following defines provide numerous constants used to distinguish between
different types of speculative dependencies. */
-/* Dependence can be overcomed with generation of new data speculative
+/* Dependence can be overcome with generation of new data speculative
instruction. */
#define BEGIN_DATA (((ds_t) DEP_WEAK_MASK) << BEGIN_DATA_BITS_OFFSET)
/* This dependence is to the instruction in the recovery block, that was
formed to recover after data-speculation failure.
- Thus, this dependence can overcomed with generating of the copy of
+ Thus, this dependence can overcome with generating of the copy of
this instruction in the recovery block. */
#define BE_IN_DATA (((ds_t) DEP_WEAK_MASK) << BE_IN_DATA_BITS_OFFSET)
-/* Dependence can be overcomed with generation of new control speculative
+/* Dependence can be overcome with generation of new control speculative
instruction. */
#define BEGIN_CONTROL (((ds_t) DEP_WEAK_MASK) << BEGIN_CONTROL_BITS_OFFSET)
/* This dependence is to the instruction in the recovery block, that was
formed to recover after control-speculation failure.
- Thus, this dependence can overcomed with generating of the copy of
+ Thus, this dependence can be be overcome with generating of the copy of
this instruction in the recovery block. */
#define BE_IN_CONTROL (((ds_t) DEP_WEAK_MASK) << BE_IN_CONTROL_BITS_OFFSET)
-/* Few convinient combinations. */
+/* A few convenient combinations. */
#define BEGIN_SPEC (BEGIN_DATA | BEGIN_CONTROL)
#define DATA_SPEC (BEGIN_DATA | BE_IN_DATA)
#define CONTROL_SPEC (BEGIN_CONTROL | BE_IN_CONTROL)
Index: sched-rgn.c
===================================================================
--- sched-rgn.c (revision 112778)
+++ sched-rgn.c (working copy)
@@ -1146,7 +1146,7 @@ extend_rgns (int *degree, int *idxp, sbi
(We don't count single block regions here).
By default we do at most 2 iterations.
- This can be overriden with max-sched-extend-regions-iters parameter:
+ This can be overridden with max-sched-extend-regions-iters parameter:
0 - disable region extension,
N > 0 - do at most N iterations. */
@@ -2038,7 +2038,7 @@ can_schedule_ready_p (rtx insn)
return 1;
}
-/* Updates counter and other information. Splitted from can_schedule_ready_p ()
+/* Updates counter and other information. Split from can_schedule_ready_p ()
because when we schedule insn speculatively then insn passed to
can_schedule_ready_p () differs from the one passed to
begin_schedule_ready (). */
@@ -2752,7 +2752,7 @@ schedule_region (int rgn)
compute_dom_prob_ps (bb);
/* Cleanup ->aux used for EDGE_TO_BIT mapping. */
- /* We don't need them anymore. But we want to avoid dublication of
+ /* We don't need them anymore. But we want to avoid duplication of
aux fields in the newly created edges. */
FOR_EACH_BB (block)
{
@@ -2952,7 +2952,7 @@ schedule_insns (void)
init_regions ();
- /* EBB_HEAD is a region-scope sctructure. But we realloc it for
+ /* EBB_HEAD is a region-scope structure. But we realloc it for
each region to save time/memory/something else. */
ebb_head = 0;
@@ -2996,7 +2996,7 @@ schedule_insns (void)
liveness. */
for (rgn = 0; rgn < nr_regions; rgn++)
if (RGN_NR_BLOCKS (rgn) > 1
- /* Or the only block of this region has been splitted. */
+ /* Or the only block of this region has been split. */
|| RGN_HAS_REAL_EBB (rgn)
/* New blocks (e.g. recovery blocks) should be processed
as parts of large regions. */
@@ -3159,7 +3159,7 @@ add_block1 (basic_block bb, basic_block
/* ebb_head[i] - VALID. */
/* Source position: ebb_head[i]
- Destination posistion: ebb_head[i] + 1
+ Destination position: ebb_head[i] + 1
Last position:
RGN_BLOCKS (nr_regions) - 1
Number of elements to copy: (last_position) - (source_position) + 1
Index: tree-inline.h
===================================================================
--- tree-inline.h (revision 112778)
+++ tree-inline.h (working copy)
@@ -64,10 +64,10 @@ typedef struct copy_body_data
int eh_region_offset;
/* We use the same mechanism do all sorts of different things. Rather
- than enumerating the different cases, we categorize the behaviour
+ than enumerating the different cases, we categorize the behavior
in the various situations. */
- /* Indicate the desired behaviour wrt call graph edges. We can either
+ /* Indicate the desired behavior wrt call graph edges. We can either
duplicate the edge (inlining, cloning), move the edge (versioning,
parallelization), or move the edges of the clones (saving). */
enum copy_body_cge_which {
Index: tree-ssa-dom.c
===================================================================
--- tree-ssa-dom.c (revision 112778)
+++ tree-ssa-dom.c (working copy)
@@ -2328,7 +2328,7 @@ propagate_rhs_into_lhs (tree stmt, tree
}
}
-/* T is either a PHI node (potentally a degenerate PHI node) or
+/* T is either a PHI node (potentially a degenerate PHI node) or
a statement that is a trivial copy or constant initialization.
Attempt to eliminate T by propagating its RHS into all uses of
@@ -2446,7 +2446,7 @@ eliminate_degenerate_phis (void)
time behavior with bitmaps rather than sbitmaps. */
interesting_names = BITMAP_ALLOC (NULL);
- /* First phase. Elimiante degenerate PHIs via a domiantor
+ /* First phase. Eliminate degenerate PHIs via a dominator
walk of the CFG.
Experiments have indicated that we generally get better
@@ -2457,7 +2457,7 @@ eliminate_degenerate_phis (void)
calculate_dominance_info (CDI_DOMINATORS);
eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
- /* Second phase. Eliminate second order degnerate PHIs as well
+ /* Second phase. Eliminate second order degenerate PHIs as well
as trivial copies or constant initializations identified by
the first phase or this phase. Basically we keep iterating
until our set of INTERESTING_NAMEs is empty. */
Index: tree-ssa-loop-prefetch.c
===================================================================
--- tree-ssa-loop-prefetch.c (revision 112778)
+++ tree-ssa-loop-prefetch.c (working copy)
@@ -810,7 +810,7 @@ anything_to_prefetch_p (struct mem_ref_g
/* Issue prefetches for the reference REF into loop as decided before.
HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR
- is the factor by thet LOOP was unrolled. */
+ is the factor by which LOOP was unrolled. */
static void
issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
Index: tree-ssa-operands.c
===================================================================
--- tree-ssa-operands.c (revision 112778)
+++ tree-ssa-operands.c (working copy)
@@ -412,7 +412,7 @@ set_virtual_use_link (use_operand_p ptr,
} while (0)
/* Initializes immediate use at USE_PTR to value VAL, and links it to the list
- of immeditate uses. STMT is the current statement. */
+ of immediate uses. STMT is the current statement. */
#define INITIALIZE_USE(USE_PTR, VAL, STMT) \
do \
@@ -492,7 +492,7 @@ add_mustdef_op (tree stmt, tree op, must
}
/* Takes elements from build_defs and turns them into def operands of STMT.
- TODO -- Given that def operands list is not neccessarily sorted, merging
+ TODO -- Given that def operands list is not necessarily sorted, merging
the operands this way does not make much sense.
-- Make build_defs VEC of tree *. */
@@ -579,7 +579,7 @@ finalize_ssa_defs (tree stmt)
}
/* Takes elements from build_uses and turns them into use operands of STMT.
- TODO -- Given that use operands list is not neccessarily sorted, merging
+ TODO -- Given that use operands list is not necessarily sorted, merging
the operands this way does not make much sense.
-- Make build_uses VEC of tree *. */
Index: tree-vect-patterns.c
===================================================================
--- tree-vect-patterns.c (revision 112778)
+++ tree-vect-patterns.c (working copy)
@@ -357,7 +357,7 @@ vect_recog_widen_mult_pattern (tree last
where type 'TYPE' is at least double the size of type 'type', i.e - we're
summing elements of type 'type' into an accumulator of type 'TYPE'. This is
- a sepcial case of a reduction computation.
+ a special case of a reduction computation.
Input:
Index: tree-vrp.c
===================================================================
--- tree-vrp.c (revision 112778)
+++ tree-vrp.c (working copy)
@@ -1116,7 +1116,7 @@ extract_range_from_assert (value_range_t
}
/* Case 3a, the anti-range extends into the low
part of the real range. Thus creating a new
- low for the real reange. */
+ low for the real range. */
else if ((compare_values (anti_max, real_min) == 1
|| compare_values (anti_max, real_min) == 0)
&& compare_values (anti_max, real_max) == -1)
@@ -1129,7 +1129,7 @@ extract_range_from_assert (value_range_t
}
/* Case 3b, the anti-range extends into the high
part of the real range. Thus creating a new
- higher for the real reange. */
+ higher for the real range. */
else if (compare_values (anti_min, real_min) == 1
&& (compare_values (anti_min, real_max) == -1
|| compare_values (anti_min, real_max) == 0))
@@ -2981,7 +2981,7 @@ find_assert_locations (basic_block bb)
it, create a new assertion location node for OP. */
if (infer_value_range (stmt, op, &comp_code, &value))
{
- /* If we are able to infer a non-zero value range for OP,
+ /* If we are able to infer a nonzero value range for OP,
then walk backwards through the use-def chain to see if OP
was set via a typecast.
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-12-16 6:09 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-12-16 6:09 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-12-16 Kazu Hirata <kazu@codesourcery.com>
* basic-block.h, config/i386/winnt.c, config/pa/pa.c,
config/s390/s390.c, dfp.c, expr.c, fold-const.c, params.def,
reload.c, struct-equiv.c, tree-ssa-ccp.c, tree-ssa-pre.c,
tree-ssa-reassoc.c, tree-ssa-structalias.c: Fix comment typos.
* doc/invoke.texi, doc/tm.texi: Fix typos.
Index: basic-block.h
===================================================================
--- basic-block.h (revision 108625)
+++ basic-block.h (working copy)
@@ -1115,7 +1115,7 @@ struct equiv_info
NEED_RERUN is set. This has to be tested by the caller to re-run
the comparison if the match appears otherwise sound. The state kept in
x_start, y_start, equiv_used and check_input_conflict ensures that
- we won't loop indefinetly. */
+ we won't loop indefinitely. */
bool need_rerun;
/* If there is indication of an input conflict at the end,
CHECK_INPUT_CONFLICT is set so that we'll check for input conflicts
@@ -1156,7 +1156,7 @@ struct equiv_info
that are being compared. A final jump insn will not be included. */
rtx x_end, y_end;
- /* If we are matching tablejumps, X_LABEL in X_BLOCK coresponds to
+ /* If we are matching tablejumps, X_LABEL in X_BLOCK corresponds to
Y_LABEL in Y_BLOCK. */
rtx x_label, y_label;
Index: config/i386/winnt.c
===================================================================
--- config/i386/winnt.c (revision 108625)
+++ config/i386/winnt.c (working copy)
@@ -155,7 +155,7 @@ i386_pe_dllimport_p (tree decl)
/* The DECL_DLLIMPORT_P flag was set for decls in the class definition
by targetm.cxx.adjust_class_at_definition. Check again to emit
- warnings if the class attribute has been overriden by an
+ warnings if the class attribute has been overridden by an
out-of-class definition. */
if (associated_type (decl)
&& lookup_attribute ("dllimport",
Index: config/pa/pa.c
===================================================================
--- config/pa/pa.c (revision 108625)
+++ config/pa/pa.c (working copy)
@@ -5304,7 +5304,7 @@ output_deferred_plabels (void)
/* If we have some deferred plabels, then we need to switch into the
data or readonly data section, and align it to a 4 byte boundary
- before outputing the deferred plabels. */
+ before outputting the deferred plabels. */
if (n_deferred_plabels)
{
switch_to_section (flag_pic ? data_section : readonly_data_section);
Index: config/s390/s390.c
===================================================================
--- config/s390/s390.c (revision 108625)
+++ config/s390/s390.c (working copy)
@@ -3998,7 +3998,7 @@ struct alignment_context
rtx shift; /* Bit offset with regard to lsb. */
rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
rtx modemaski; /* ~modemask */
- bool aligned; /* True if memory is aliged, false else. */
+ bool aligned; /* True if memory is aligned, false else. */
};
/* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
Index: dfp.c
===================================================================
--- dfp.c (revision 108625)
+++ dfp.c (working copy)
@@ -333,7 +333,7 @@ decimal_from_binary (REAL_VALUE_TYPE *to
}
/* Helper function to real.c:do_compare() to handle decimal internal
- represenation including when one of the operands is still in the
+ representation including when one of the operands is still in the
binary internal representation. */
int
Index: doc/invoke.texi
===================================================================
--- doc/invoke.texi (revision 108625)
+++ doc/invoke.texi (working copy)
@@ -5853,7 +5853,7 @@ A, the growth of unit is 300\% and yet s
large units consisting of small inlininable functions however the overall unit
growth limit is needed to avoid exponential explosion of code size. Thus for
smaller units, the size is increased to @option{--param large-unit-insns}
-before aplying @option{--param inline-unit-growth}. The default is 10000
+before applying @option{--param inline-unit-growth}. The default is 10000
@item inline-unit-growth
Specifies maximal overall growth of the compilation unit caused by inlining.
Index: doc/tm.texi
===================================================================
--- doc/tm.texi (revision 108625)
+++ doc/tm.texi (working copy)
@@ -3032,7 +3032,7 @@ The final value should conincide with th
Normally the CFA is calculated as an offset from the argument pointer,
via @code{ARG_POINTER_CFA_OFFSET}, but if the argument pointer is
variable due to the ABI, this may not be possible. If this macro is
-defined, it imples that the virtual register instantiation should be
+defined, it implies that the virtual register instantiation should be
based on the frame pointer instead of the argument pointer. Only one
of @code{FRAME_POINTER_CFA_OFFSET} and @code{ARG_POINTER_CFA_OFFSET}
should be defined.
Index: expr.c
===================================================================
--- expr.c (revision 108625)
+++ expr.c (working copy)
@@ -8090,7 +8090,7 @@ expand_expr_real_1 (tree exp, rtx target
else
comparison_code = unsignedp ? LEU : LE;
- /* Canonicalize to comparsions against 0. */
+ /* Canonicalize to comparisons against 0. */
if (op1 == const1_rtx)
{
/* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
Index: fold-const.c
===================================================================
--- fold-const.c (revision 108625)
+++ fold-const.c (working copy)
@@ -6809,7 +6809,7 @@ fold_unary (enum tree_code code, tree ty
if (TREE_TYPE (op0) == type)
return op0;
- /* If we have (type) (a CMP b) and type is an integal type, return
+ /* If we have (type) (a CMP b) and type is an integral type, return
new expression involving the new type. */
if (COMPARISON_CLASS_P (op0) && INTEGRAL_TYPE_P (type))
return fold_build2 (TREE_CODE (op0), type, TREE_OPERAND (op0, 0),
@@ -8408,7 +8408,7 @@ fold_binary (enum tree_code code, tree t
}
/* Optimize tan(x)/sin(x) as 1.0/cos(x) if we don't care about
- NaNs or Infintes. */
+ NaNs or Infinities. */
if (((fcode0 == BUILT_IN_TAN && fcode1 == BUILT_IN_SIN)
|| (fcode0 == BUILT_IN_TANF && fcode1 == BUILT_IN_SINF)
|| (fcode0 == BUILT_IN_TANL && fcode1 == BUILT_IN_SINL)))
Index: params.def
===================================================================
--- params.def (revision 108625)
+++ params.def (working copy)
@@ -321,7 +321,7 @@ DEFPARAM(HOT_BB_FREQUENCY_FRACTION,
the other loops cold that is not usually the case. So we need to artificially
flatten the profile.
- We need to cut the maximal predicted iterations to large enought iterations
+ We need to cut the maximal predicted iterations to large enough iterations
so the loop appears important, but safely within HOT_BB_COUNT_FRACTION
range. */
Index: reload.c
===================================================================
--- reload.c (revision 108625)
+++ reload.c (working copy)
@@ -366,7 +366,7 @@ push_secondary_reload (int in_p, rtx x,
gcc_assert (insn_data[(int) icode].n_operands == 3);
/* ??? We currently have no way to represent a reload that needs
- an icode to reload from an intermediate tertiaty reload register.
+ an icode to reload from an intermediate tertiary reload register.
We should probably have a new field in struct reload to tag a
chain of scratch operand reloads onto. */
gcc_assert (class == NO_REGS);
Index: struct-equiv.c
===================================================================
--- struct-equiv.c (revision 108625)
+++ struct-equiv.c (working copy)
@@ -54,7 +54,7 @@ Software Foundation, 51 Franklin Street,
the number of inputs an miss an input conflict. Sufficient information
is gathered so that when we make another pass, we won't have to backtrack
at the same point.
- Another issue is that information in memory atttributes and/or REG_NOTES
+ Another issue is that information in memory attributes and/or REG_NOTES
might have to be merged or discarded to make a valid match. We don't want
to discard such information when we are not certain that we want to merge
the two (partial) blocks.
@@ -99,7 +99,7 @@ static bool resolve_input_conflict (stru
SECONDARY_MEMORY_NEEDED, cannot be done directly. For our purposes, we
consider them impossible to generate after reload (even though some
might be synthesized when you throw enough code at them).
- Since we don't know while procesing a cross-jump if a local register
+ Since we don't know while processing a cross-jump if a local register
that is currently live will eventually be live and thus be an input,
we keep track of potential inputs that would require an impossible move
by using a prohibitively high cost for them.
@@ -201,7 +201,7 @@ merge_memattrs (rtx x, rtx y)
}
/* In SET, assign the bit for the register number of REG the value VALUE.
- If REG is a hard register, do so for all its consituent registers.
+ If REG is a hard register, do so for all its constituent registers.
Return the number of registers that have become included (as a positive
number) or excluded (as a negative number). */
static int
@@ -1128,7 +1128,7 @@ struct_equiv_block_eq (int mode, struct
if (mode & STRUCT_EQUIV_MATCH_JUMPS)
{
- /* The caller is expected to have comapred the jumps already, but we
+ /* The caller is expected to have compared the jumps already, but we
need to match them again to get any local registers and inputs. */
gcc_assert (!info->cur.x_start == !info->cur.y_start);
if (info->cur.x_start)
Index: tree-ssa-ccp.c
===================================================================
--- tree-ssa-ccp.c (revision 108625)
+++ tree-ssa-ccp.c (working copy)
@@ -276,7 +276,7 @@ debug_lattice_value (prop_value_t val)
/* The regular is_gimple_min_invariant does a shallow test of the object.
It assumes that full gimplification has happened, or will happen on the
object. For a value coming from DECL_INITIAL, this is not true, so we
- have to be more strict outselves. */
+ have to be more strict ourselves. */
static bool
ccp_decl_initial_min_invariant (tree t)
Index: tree-ssa-pre.c
===================================================================
--- tree-ssa-pre.c (revision 108625)
+++ tree-ssa-pre.c (working copy)
@@ -555,7 +555,7 @@ bitmap_set_copy (bitmap_set_t dest, bitm
bitmap_copy (dest->values, orig->values);
}
-/* Perform bitmapped set rperation DEST &= ORIG. */
+/* Perform bitmapped set operation DEST &= ORIG. */
static void
bitmap_set_and (bitmap_set_t dest, bitmap_set_t orig)
Index: tree-ssa-reassoc.c
===================================================================
--- tree-ssa-reassoc.c (revision 108625)
+++ tree-ssa-reassoc.c (working copy)
@@ -69,7 +69,7 @@ Boston, MA 02110-1301, USA. */
In order to promote the most redundancy elimination, you want
binary expressions whose operands are the same rank (or
- preferrably, the same value) exposed to the redundancy eliminator,
+ preferably, the same value) exposed to the redundancy eliminator,
for possible elimination.
So the way to do this if we really cared, is to build the new op
Index: tree-ssa-structalias.c
===================================================================
--- tree-ssa-structalias.c (revision 108625)
+++ tree-ssa-structalias.c (working copy)
@@ -3294,7 +3294,7 @@ find_func_aliases (tree origt)
gcc_assert (found);
}
- /* Assign all the passed arguments to the approriate incoming
+ /* Assign all the passed arguments to the appropriate incoming
parameters of the function. */
fi = get_varinfo (varid);
arglist = TREE_OPERAND (rhsop, 1);
@@ -3683,7 +3683,7 @@ create_function_info_for (tree decl, con
arg = DECL_ARGUMENTS (decl);
- /* Set up varirables for each argument. */
+ /* Set up variables for each argument. */
for (i = 1; i < vi->fullsize; i++)
{
varinfo_t argvi;
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-08-01 3:56 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-08-01 3:56 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-08-01 Kazu Hirata <kazu@codesourcery.com>
* dwarf2out.c, fold-const.c, ipa-type-escape.c,
loop-invariant.c, predict.c, predict.def, reload1.c, reorg.c,
tree-sra.c, config/arm/arm.c, config/crx/crx.c,
config/i386/i386.c, config/mips/mips.h,
config/rs6000/rs6000.h, config/sh/sh.c,
config/stormy16/stormy16.c: Fix comment typos.
Index: dwarf2out.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/dwarf2out.c,v
retrieving revision 1.608
diff -u -d -p -r1.608 dwarf2out.c
--- dwarf2out.c 26 Jul 2005 02:56:42 -0000 1.608
+++ dwarf2out.c 1 Aug 2005 03:48:52 -0000
@@ -13078,7 +13078,7 @@ dwarf2out_decl (tree decl)
declarations. We have to check DECL_INITIAL instead. That's because
the C front-end supports some weird semantics for "extern inline"
function definitions. These can get inlined within the current
- translation unit (an thus, we need to generate Dwarf info for their
+ translation unit (and thus, we need to generate Dwarf info for their
abstract instances so that the Dwarf info for the concrete inlined
instances can have something to refer to) but the compiler never
generates any out-of-lines instances of such things (despite the fact
Index: fold-const.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/fold-const.c,v
retrieving revision 1.612
diff -u -d -p -r1.612 fold-const.c
--- fold-const.c 30 Jul 2005 14:39:07 -0000 1.612
+++ fold-const.c 1 Aug 2005 03:48:53 -0000
@@ -2029,7 +2029,7 @@ fold_convert (tree type, tree arg)
}
}
\f
-/* Return false if expr can be assumed not to be an value, true
+/* Return false if expr can be assumed not to be an lvalue, true
otherwise. */
static bool
Index: ipa-type-escape.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/ipa-type-escape.c,v
retrieving revision 2.3
diff -u -d -p -r2.3 ipa-type-escape.c
--- ipa-type-escape.c 26 Jul 2005 13:53:51 -0000 2.3
+++ ipa-type-escape.c 1 Aug 2005 03:48:53 -0000
@@ -1622,7 +1622,7 @@ close_type_full_escape (tree type)
}
/* Transitively close the addressof bitmap for the type with UID.
- This means that if we had a.b and b.c, a would have both b an c in
+ This means that if we had a.b and b.c, a would have both b and c in
its maps. */
static bitmap
Index: loop-invariant.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/loop-invariant.c,v
retrieving revision 2.15
diff -u -d -p -r2.15 loop-invariant.c
--- loop-invariant.c 25 Jun 2005 02:00:35 -0000 2.15
+++ loop-invariant.c 1 Aug 2005 03:48:53 -0000
@@ -470,7 +470,7 @@ find_invariant_insn (rtx insn, bool alwa
create_new_invariant (def, insn, depends_on, always_executed);
}
-/* Record registers used in INSN that have an unique invariant definition.
+/* Record registers used in INSN that have a unique invariant definition.
DF is the dataflow object. */
static void
Index: predict.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/predict.c,v
retrieving revision 1.154
diff -u -d -p -r1.154 predict.c
--- predict.c 21 Jul 2005 07:24:07 -0000 1.154
+++ predict.c 1 Aug 2005 03:48:53 -0000
@@ -1557,11 +1557,11 @@ typedef struct block_info_def
/* Similar information for edges. */
typedef struct edge_info_def
{
- /* In case edge is an loopback edge, the probability edge will be reached
+ /* In case edge is a loopback edge, the probability edge will be reached
in case header is. Estimated number of iterations of the loop can be
then computed as 1 / (1 - back_edge_prob). */
sreal back_edge_prob;
- /* True if the edge is an loopback edge in the natural loop. */
+ /* True if the edge is a loopback edge in the natural loop. */
unsigned int back_edge:1;
} *edge_info;
Index: predict.def
===================================================================
RCS file: /cvs/gcc/gcc/gcc/predict.def,v
retrieving revision 1.22
diff -u -d -p -r1.22 predict.def
--- predict.def 25 Jun 2005 02:00:44 -0000 1.22
+++ predict.def 1 Aug 2005 03:48:53 -0000
@@ -41,7 +41,7 @@ DEF_PREDICTOR (PRED_COMBINED, "combined"
/* An outcome estimated by Dempster-Shaffer theory. */
DEF_PREDICTOR (PRED_DS_THEORY, "DS theory", PROB_ALWAYS, 0)
-/* An combined heuristics using probability determined by first
+/* A combined heuristics using probability determined by first
matching heuristics from this list. */
DEF_PREDICTOR (PRED_FIRST_MATCH, "first match", PROB_ALWAYS, 0)
Index: reload1.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/reload1.c,v
retrieving revision 1.477
diff -u -d -p -r1.477 reload1.c
--- reload1.c 25 Jun 2005 02:00:54 -0000 1.477
+++ reload1.c 1 Aug 2005 03:48:54 -0000
@@ -1042,7 +1042,7 @@ reload (rtx first, int global)
/* If we already deleted the insn or if it may trap, we can't
delete it. The latter case shouldn't happen, but can
if an insn has a variable address, gets a REG_EH_REGION
- note added to it, and then gets converted into an load
+ note added to it, and then gets converted into a load
from a constant address. */
if (NOTE_P (equiv_insn)
|| can_throw_internal (equiv_insn))
Index: reorg.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/reorg.c,v
retrieving revision 1.109
diff -u -d -p -r1.109 reorg.c
--- reorg.c 5 Jul 2005 16:20:14 -0000 1.109
+++ reorg.c 1 Aug 2005 03:48:58 -0000
@@ -1934,7 +1934,7 @@ reorg_redirect_jump (rtx jump, rtx nlabe
that reference values used in INSN. If we find one, then we move the
REG_DEAD note to INSN.
- This is needed to handle the case where an later insn (after INSN) has a
+ This is needed to handle the case where a later insn (after INSN) has a
REG_DEAD note for a register used by INSN, and this later insn subsequently
gets moved before a CODE_LABEL because it is a redundant insn. In this
case, mark_target_live_regs may be confused into thinking the register
Index: tree-sra.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-sra.c,v
retrieving revision 2.68
diff -u -d -p -r2.68 tree-sra.c
--- tree-sra.c 20 Jul 2005 01:18:23 -0000 2.68
+++ tree-sra.c 1 Aug 2005 03:48:58 -0000
@@ -724,7 +724,7 @@ sra_walk_expr (tree *expr_p, block_stmt_
goto use_all;
case ARRAY_RANGE_REF:
- /* Similarly, an subrange reference is used to modify indexing. Which
+ /* Similarly, a subrange reference is used to modify indexing. Which
means that the canonical element names that we have won't work. */
goto use_all;
Index: config/arm/arm.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/arm/arm.c,v
retrieving revision 1.470
diff -u -d -p -r1.470 arm.c
--- config/arm/arm.c 30 Jul 2005 00:16:35 -0000 1.470
+++ config/arm/arm.c 1 Aug 2005 03:48:59 -0000
@@ -14738,7 +14738,7 @@ arm_unwind_emit_stm (FILE * asm_out_file
if (reg < 16)
{
/* The function prologue may also push pc, but not annotate it as it is
- never restored. We turn this into an stack pointer adjustment. */
+ never restored. We turn this into a stack pointer adjustment. */
if (nregs * 4 == offset - 4)
{
fprintf (asm_out_file, "\t.pad #4\n");
Index: config/crx/crx.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/crx/crx.c,v
retrieving revision 1.3
diff -u -d -p -r1.3 crx.c
--- config/crx/crx.c 26 Jul 2005 13:53:52 -0000 1.3
+++ config/crx/crx.c 1 Aug 2005 03:48:59 -0000
@@ -80,7 +80,7 @@
((GET_CODE(X) == CONST_INT \
&& SIGNED_INT_FITS_N_BITS(INTVAL(X),n)) ? 1 : 0)
-/* Nonzero if the rtx X is a unsigned const int of n bits */
+/* Nonzero if the rtx X is an unsigned const int of n bits. */
#define RTX_UNSIGNED_INT_FITS_N_BITS(X,n) \
((GET_CODE(X) == CONST_INT \
&& UNSIGNED_INT_FITS_N_BITS(INTVAL(X),n)) ? 1 : 0)
Index: config/i386/i386.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/i386/i386.c,v
retrieving revision 1.846
diff -u -d -p -r1.846 i386.c
--- config/i386/i386.c 31 Jul 2005 09:12:28 -0000 1.846
+++ config/i386/i386.c 1 Aug 2005 03:49:00 -0000
@@ -1791,7 +1791,7 @@ x86_64_elf_unique_section (tree decl, in
/* This says how to output assembler code to declare an
uninitialized external linkage data object.
- For medim model x86-64 we need to use .largecomm opcode for
+ For medium model x86-64 we need to use .largecomm opcode for
large objects. */
void
x86_elf_aligned_common (FILE *file,
@@ -5789,7 +5789,7 @@ legitimate_address_p (enum machine_mode
return FALSE;
}
\f
-/* Return an unique alias set for the GOT. */
+/* Return a unique alias set for the GOT. */
static HOST_WIDE_INT
ix86_GOT_alias_set (void)
Index: config/mips/mips.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/mips/mips.h,v
retrieving revision 1.401
diff -u -d -p -r1.401 mips.h
--- config/mips/mips.h 29 Jul 2005 17:25:24 -0000 1.401
+++ config/mips/mips.h 1 Aug 2005 03:49:00 -0000
@@ -1663,7 +1663,7 @@ extern enum reg_class mips_char_to_class
#define REG_CLASS_FROM_LETTER(C) mips_char_to_class[(unsigned char)(C)]
-/* True if VALUE is a unsigned 6-bit number. */
+/* True if VALUE is an unsigned 6-bit number. */
#define UIMM6_OPERAND(VALUE) \
(((VALUE) & ~(unsigned HOST_WIDE_INT) 0x3f) == 0)
Index: config/rs6000/rs6000.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/rs6000/rs6000.h,v
retrieving revision 1.379
diff -u -d -p -r1.379 rs6000.h
--- config/rs6000/rs6000.h 21 Jul 2005 07:30:00 -0000 1.379
+++ config/rs6000/rs6000.h 1 Aug 2005 03:49:00 -0000
@@ -1109,7 +1109,7 @@ enum reg_class
'T' is a constant that can be placed into a 32-bit mask operand
'U' is for V.4 small data references.
'W' is a vector constant that can be easily generated (no mem refs).
- 'Y' is a indexed or word-aligned displacement memory operand.
+ 'Y' is an indexed or word-aligned displacement memory operand.
'Z' is an indexed or indirect memory operand.
't' is for AND masks that can be performed by two rldic{l,r} insns. */
Index: config/sh/sh.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/sh/sh.c,v
retrieving revision 1.339
diff -u -d -p -r1.339 sh.c
--- config/sh/sh.c 20 Jul 2005 05:03:21 -0000 1.339
+++ config/sh/sh.c 1 Aug 2005 03:49:01 -0000
@@ -1755,7 +1755,7 @@ unspec_caller_rtx_p (rtx pat)
}
/* Indicate that INSN cannot be duplicated. This is true for insn
- that generates an unique label. */
+ that generates a unique label. */
static bool
sh_cannot_copy_insn_p (rtx insn)
Index: config/stormy16/stormy16.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/stormy16/stormy16.c,v
retrieving revision 1.80
diff -u -d -p -r1.80 stormy16.c
--- config/stormy16/stormy16.c 12 Jul 2005 03:48:20 -0000 1.80
+++ config/stormy16/stormy16.c 1 Aug 2005 03:49:01 -0000
@@ -2479,7 +2479,7 @@ combine_bnp (rtx insn)
if (need_extend)
{
- /* LT and GE conditionals should have an sign extend before
+ /* LT and GE conditionals should have a sign extend before
them. */
for (and = prev_real_insn (insn); and; and = prev_real_insn (and))
{
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-07-29 14:52 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-07-29 14:52 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-07-29 Kazu Hirata <kazu@codesourcery.com>
* cfg.c, tree-complex.c, config/frv/frv.c, config/i386/i386.c:
Fix comment typos.
Index: cfg.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/cfg.c,v
retrieving revision 1.102
diff -u -d -p -r1.102 cfg.c
--- cfg.c 28 Jul 2005 07:41:21 -0000 1.102
+++ cfg.c 29 Jul 2005 14:48:03 -0000
@@ -926,7 +926,7 @@ scale_bbs_frequencies_int (basic_block *
if (num > den)
return;
/* Assume that the users are producing the fraction from frequencies
- that never grow far enought to risk arithmetic overflow. */
+ that never grow far enough to risk arithmetic overflow. */
gcc_assert (num < 65536);
for (i = 0; i < nbbs; i++)
{
Index: tree-complex.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-complex.c,v
retrieving revision 2.40
diff -u -d -p -r2.40 tree-complex.c
--- tree-complex.c 26 Jul 2005 21:38:45 -0000 2.40
+++ tree-complex.c 29 Jul 2005 14:48:04 -0000
@@ -502,7 +502,7 @@ set_component_ssa_name (tree ssa_name, b
;
/* If we've nothing assigned, and the value we're given is already stable,
- then install that as the value for this SSA_NAME. This pre-emptively
+ then install that as the value for this SSA_NAME. This preemptively
copy-propagates the value, which avoids unnecessary memory allocation. */
else if (is_gimple_min_invariant (value))
{
Index: config/frv/frv.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/frv/frv.c,v
retrieving revision 1.96
diff -u -d -p -r1.96 frv.c
--- config/frv/frv.c 28 Jul 2005 02:03:36 -0000 1.96
+++ config/frv/frv.c 29 Jul 2005 14:48:06 -0000
@@ -7838,7 +7838,7 @@ frv_optimize_membar_local (basic_block b
is null if the membar has already been deleted.
Note that the initialization here should only be needed to
- supress warnings. */
+ suppress warnings. */
next_membar = 0;
/* USED_REGS is the set of registers that are used before the
Index: config/i386/i386.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/i386/i386.c,v
retrieving revision 1.843
diff -u -d -p -r1.843 i386.c
--- config/i386/i386.c 18 Jul 2005 06:39:18 -0000 1.843
+++ config/i386/i386.c 29 Jul 2005 14:48:08 -0000
@@ -17490,7 +17490,7 @@ ix86_expand_vector_extract (bool mmx_ok,
}
}
-/* Expand a vector reduction on V4SFmode for SSE1. FN is the binar
+/* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
pattern to reduce; DEST is the destination; IN is the input vector. */
void
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-07-03 16:12 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-07-03 16:12 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-07-03 Kazu Hirata <kazu@codesourcery.com>
* c-decl.c, tree-object-size.c, tree-vectorizer.c,
config/arm/unwind-arm.c, config/arm/unwind-arm.h: Fix comment
typos.
Index: c-decl.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/c-decl.c,v
retrieving revision 1.672
diff -u -d -p -r1.672 c-decl.c
--- c-decl.c 3 Jul 2005 01:14:53 -0000 1.672
+++ c-decl.c 3 Jul 2005 15:39:09 -0000
@@ -1308,7 +1308,7 @@ diagnose_mismatched_decls (tree newdecl,
if (DECL_INITIAL (olddecl))
{
/* If both decls are in the same TU and the new declaration
- isn't overridding an extern inline reject the new decl.
+ isn't overriding an extern inline reject the new decl.
When we handle c99 style inline rules we'll want to reject
the following:
Index: tree-object-size.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-object-size.c,v
retrieving revision 2.2
diff -u -d -p -r2.2 tree-object-size.c
--- tree-object-size.c 1 Jul 2005 02:10:41 -0000 2.2
+++ tree-object-size.c 3 Jul 2005 15:39:09 -0000
@@ -623,7 +623,7 @@ plus_expr_object_size (struct object_siz
/* Compute object sizes for VAR.
For ADDR_EXPR an object size is the number of remaining bytes
- to the end of the object (where what is consindered an object depends on
+ to the end of the object (where what is considered an object depends on
OSI->object_size_type).
For allocation CALL_EXPR like malloc or calloc object size is the size
of the allocation.
Index: tree-vectorizer.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-vectorizer.c,v
retrieving revision 2.100
diff -u -d -p -r2.100 tree-vectorizer.c
--- tree-vectorizer.c 29 Jun 2005 00:36:31 -0000 2.100
+++ tree-vectorizer.c 3 Jul 2005 15:39:09 -0000
@@ -1747,7 +1747,7 @@ vect_is_simple_use (tree operand, loop_v
CODE - tree_code of a reduction operations.
Output:
- REDUC_CODE - the correponding tree-code to be used to reduce the
+ REDUC_CODE - the corresponding tree-code to be used to reduce the
vector of partial results into a single scalar result (which
will also reside in a vector).
Index: config/arm/unwind-arm.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/arm/unwind-arm.c,v
retrieving revision 1.3
diff -u -d -p -r1.3 unwind-arm.c
--- config/arm/unwind-arm.c 1 Jul 2005 02:10:44 -0000 1.3
+++ config/arm/unwind-arm.c 3 Jul 2005 15:39:09 -0000
@@ -28,7 +28,7 @@
#include "unwind.h"
/* Definitions for C++ runtime support routines. We make these weak
- declarations to avoid pulling in libsupc++ unneccesarily. */
+ declarations to avoid pulling in libsupc++ unnecessarily. */
typedef unsigned char bool;
typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */
@@ -119,7 +119,7 @@ typedef struct
} phase2_vrs;
-/* An exeption index table entry. */
+/* An exception index table entry. */
typedef struct __EIT_entry
{
@@ -375,7 +375,7 @@ search_EIT_table (const __EIT_entry * ta
/* Find the exception index table eintry for the given address.
Fill in the relevant fields of the UCB.
- Returns _URC_FAILURE if an error occured, _URC_OK on success*/
+ Returns _URC_FAILURE if an error occurred, _URC_OK on success. */
static _Unwind_Reason_Code
get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address)
@@ -676,7 +676,7 @@ __gnu_unwind_pr_common (_Unwind_State st
/* Cleanup in range, and we are running cleanups. */
_uw lp;
- /* Landing pad address is 31-bit pc-relatvie offset. */
+ /* Landing pad address is 31-bit pc-relative offset. */
lp = selfrel_offset31 (data);
data++;
/* Save the exception data pointer. */
@@ -705,7 +705,7 @@ __gnu_unwind_pr_common (_Unwind_State st
if (data[1] == (_uw) -2)
return _URC_FAILURE;
- /* The thrown object immediately folows the ECB. */
+ /* The thrown object immediately follows the ECB. */
matched = (void *)(ucbp + 1);
if (data[1] != (_uw) -1)
{
@@ -739,7 +739,7 @@ __gnu_unwind_pr_common (_Unwind_State st
_Unwind_SetGR (context, 0, (_uw) ucbp);
return _URC_INSTALL_CONTEXT;
}
- /* Catch handler not mached. Advance to the next descriptor. */
+ /* Catch handler not matched. Advance to the next descriptor. */
data += 2;
break;
@@ -818,7 +818,7 @@ __gnu_unwind_pr_common (_Unwind_State st
if (phase2_call_unexpected_after_unwind)
{
- /* Enter __cxa_unexpected as if called from the callsite. */
+ /* Enter __cxa_unexpected as if called from the call site. */
_Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC));
_Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected);
return _URC_INSTALL_CONTEXT;
Index: config/arm/unwind-arm.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/arm/unwind-arm.h,v
retrieving revision 1.2
diff -u -d -p -r1.2 unwind-arm.h
--- config/arm/unwind-arm.h 1 Jul 2005 02:10:44 -0000 1.2
+++ config/arm/unwind-arm.h 3 Jul 2005 15:39:09 -0000
@@ -26,8 +26,8 @@
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
-/* Language-independent unwinder header public defines. This contins both
- ABI defined objects, and GNU support routines.*/
+/* Language-independent unwinder header public defines. This contains both
+ ABI defined objects, and GNU support routines. */
#ifndef UNWIND_ARM_H
#define UNWIND_ARM_H
@@ -223,7 +223,7 @@ extern "C" {
tmp += ptr;
tmp = *(_Unwind_Word *) tmp;
#elif defined(__symbian__)
- /* Absoute pointer. Nothing more to do. */
+ /* Absolute pointer. Nothing more to do. */
#else
/* Pc-relative pointer. */
tmp += ptr;
@@ -250,7 +250,7 @@ extern "C" {
}
/* The dwarf unwinder doesn't understand arm/thumb state. We assume the
- landing pad uses the same instruction set as the callsite. */
+ landing pad uses the same instruction set as the call site. */
#define _Unwind_SetIP(context, val) \
_Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1))
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-06-12 14:03 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-06-12 14:03 UTC (permalink / raw)
To: gcc-patches
[-- Attachment #1: Type: text/plain, Size: 40 bytes --]
Hi,
Committed as obvious.
Kazu Hirata
[-- Attachment #2: typos-gcc-1.patch --]
[-- Type: text/x-patch, Size: 5955 bytes --]
2005-06-12 Kazu Hirata <kazu@codesourcery.com>
* cgraphunit.c, tree-ssa-loop-ivopts.c,
tree-ssa-structalias.c, tree-vectorizer.c, tree-vectorizer.h,
config/sparc/sparc.c: Fix comment typos.
Index: cgraphunit.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/cgraphunit.c,v
retrieving revision 1.117
diff -u -d -p -r1.117 cgraphunit.c
--- cgraphunit.c 9 Jun 2005 16:21:30 -0000 1.117
+++ cgraphunit.c 12 Jun 2005 13:57:05 -0000
@@ -736,7 +736,7 @@ cgraph_varpool_assemble_pending_decls (v
if (!TREE_ASM_WRITTEN (decl) && !node->alias && !DECL_EXTERNAL (decl))
{
assemble_variable (decl, 0, 1, 0);
- /* Local static vairables are neever seen by check_global_declarations
+ /* Local static variables are never seen by check_global_declarations
so we need to output debug info by hand. */
if (decl_function_context (decl) && errorcount == 0 && sorrycount == 0)
{
Index: tree-ssa-loop-ivopts.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-loop-ivopts.c,v
retrieving revision 2.79
diff -u -d -p -r2.79 tree-ssa-loop-ivopts.c
--- tree-ssa-loop-ivopts.c 8 Jun 2005 22:47:07 -0000 2.79
+++ tree-ssa-loop-ivopts.c 12 Jun 2005 13:57:14 -0000
@@ -1492,7 +1492,7 @@ may_be_unaligned_p (tree ref)
unsigned base_align;
/* TARGET_MEM_REFs are translated directly to valid MEMs on the target,
- thus they are not missaligned. */
+ thus they are not misaligned. */
if (TREE_CODE (ref) == TARGET_MEM_REF)
return false;
Index: tree-ssa-structalias.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-structalias.c,v
retrieving revision 2.2
diff -u -d -p -r2.2 tree-ssa-structalias.c
--- tree-ssa-structalias.c 12 Jun 2005 03:19:26 -0000 2.2
+++ tree-ssa-structalias.c 12 Jun 2005 13:57:19 -0000
@@ -80,7 +80,7 @@ Foundation, Inc., 59 Temple Place, Suite
DEREF is a constraint expression type used to represent *x, whether
it appears on the LHS or the RHS of a statement.
ADDRESSOF is a constraint expression used to represent &x, whether
- it apepars on the LHS or the RHS of a statement.
+ it appears on the LHS or the RHS of a statement.
Each pointer variable in the program is assigned an integer id, and
each field of a structure variable is assigned an integer id as well.
@@ -137,8 +137,8 @@ Foundation, Inc., 59 Temple Place, Suite
causes Sol(P) <- Sol(P) union Sol(Q).
7. As we visit each node, all complex constraints associated with
- that node are processed by adding approriate copy edges to the graph, or the
- approriate variables to the solution set.
+ that node are processed by adding appropriate copy edges to the graph, or the
+ appropriate variables to the solution set.
8. The process of walking the graph is iterated until no solution
sets change.
@@ -245,7 +245,7 @@ DEF_VEC_P(varinfo_t);
DEF_VEC_ALLOC_P(varinfo_t, gc);
-/* Table of variable info structures for constraint variables. Indexed direcly
+/* Table of variable info structures for constraint variables. Indexed directly
by variable info id. */
static VEC(varinfo_t,gc) *varmap;
#define get_varinfo(n) VEC_index(varinfo_t, varmap, n)
@@ -1582,7 +1582,7 @@ perform_var_substitution (constraint_gra
/* Theorem 4 in Rountev and Chandra: If i is a direct node,
then Solution(i) is a subset of Solution (w), where w is a
predecessor in the graph.
- Corrolary: If all predecessors of i have the same
+ Corollary: If all predecessors of i have the same
points-to set, then i has that same points-to set as
those predecessors. */
tmp = BITMAP_ALLOC (NULL);
@@ -3052,7 +3052,7 @@ init_base_vars (void)
/* readonly memory points to anything, in order to make deref
easier. In reality, it points to anything the particular
readonly variable can point to, but we don't track this
- seperately. */
+ separately. */
lhs.type = SCALAR;
lhs.var = readonly_id;
lhs.offset = 0;
Index: tree-vectorizer.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-vectorizer.c,v
retrieving revision 2.95
diff -u -d -p -r2.95 tree-vectorizer.c
--- tree-vectorizer.c 10 Jun 2005 14:51:44 -0000 2.95
+++ tree-vectorizer.c 12 Jun 2005 13:57:22 -0000
@@ -1748,7 +1748,7 @@ vect_is_simple_use (tree operand, loop_v
TODO:
Detect a cross-iteration def-use cucle that represents a simple
- reduction computation. We look for the followng pattern:
+ reduction computation. We look for the following pattern:
loop_header:
a1 = phi < a0, a2 >
Index: tree-vectorizer.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-vectorizer.h,v
retrieving revision 2.22
diff -u -d -p -r2.22 tree-vectorizer.h
--- tree-vectorizer.h 10 Jun 2005 14:51:47 -0000 2.22
+++ tree-vectorizer.h 12 Jun 2005 13:57:23 -0000
@@ -56,7 +56,7 @@ enum dr_alignment_support {
dr_aligned
};
-/* Define type of def-use cross-iteraiton cycle. */
+/* Define type of def-use cross-iteration cycle. */
enum vect_def_type {
vect_constant_def,
vect_invariant_def,
Index: config/sparc/sparc.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/sparc/sparc.c,v
retrieving revision 1.377
diff -u -d -p -r1.377 sparc.c
--- config/sparc/sparc.c 8 Jun 2005 16:03:14 -0000 1.377
+++ config/sparc/sparc.c 12 Jun 2005 13:57:38 -0000
@@ -7978,7 +7978,7 @@ sparc_handle_vis_mul8x16 (int fncode, tr
}
/* Handle TARGET_FOLD_BUILTIN target hook.
- Fold builtin functions for SPARC intrinsics. If INGNORE is true the
+ Fold builtin functions for SPARC intrinsics. If IGNORE is true the
result of the function call is ignored. NULL_TREE is returned if the
function could not be folded. */
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-06-03 13:42 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-06-03 13:42 UTC (permalink / raw)
To: gcc-patches
[-- Attachment #1: Type: text/plain, Size: 40 bytes --]
Hi,
Committed as obvious.
Kazu Hirata
[-- Attachment #2: typos-gcc-1.patch --]
[-- Type: text/x-patch, Size: 4246 bytes --]
To: gcc-patches@gcc.gnu.org
Subject: [patch] gcc/*: Fix comment typos.
Hi,
Committed as obvious.
Kazu Hirata
2005-06-03 Kazu Hirata <kazu@codesourcery.com>
* cgraph.c, cgraphunit.c, config/mips/mips.c: Fix comment
typos.
Index: cgraph.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/cgraph.c,v
retrieving revision 1.76
diff -u -d -p -r1.76 cgraph.c
--- cgraph.c 2 Jun 2005 19:41:31 -0000 1.76
+++ cgraph.c 3 Jun 2005 13:36:32 -0000
@@ -860,7 +860,7 @@ cgraph_varpool_finalize_decl (tree decl)
if (decide_is_variable_needed (node, decl))
cgraph_varpool_mark_needed_node (node);
- /* Since we reclaim unrechable nodes at the end of every language
+ /* Since we reclaim unreachable nodes at the end of every language
level unit, we need to be conservative about possible entry points
there. */
if (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_EXTERNAL (decl))
@@ -987,12 +987,12 @@ cgraph_function_body_availability (struc
care at least of two notable extensions - the COMDAT functions
used to share template instantiations in C++ (this is symmetric
to code cp_cannot_inline_tree_fn and probably shall be shared and
- the inlinability hooks completelly elliminated).
+ the inlinability hooks completely eliminated).
??? Does the C++ one definition rule allow us to always return
AVAIL_AVAILABLE here? That would be good reason to preserve this
hook Similarly deal with extern inline functions - this is again
- neccesary to get C++ shared functions having keyed templates
+ necessary to get C++ shared functions having keyed templates
right and in the C extension documentation we probably should
document the requirement of both versions of function (extern
inline and offline) having same side effect characteristics as
@@ -1016,7 +1016,7 @@ cgraph_variable_initializer_availability
return AVAIL_NOT_AVAILABLE;
if (!TREE_PUBLIC (node->decl))
return AVAIL_AVAILABLE;
- /* If the variable can be overwritted, return OVERWRITABLE. Takes
+ /* If the variable can be overwritten, return OVERWRITABLE. Takes
care of at least two notable extensions - the COMDAT variables
used to share template instantiations in C++. */
if (!(*targetm.binds_local_p) (node->decl) && !DECL_COMDAT (node->decl))
Index: cgraphunit.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/cgraphunit.c,v
retrieving revision 1.114
diff -u -d -p -r1.114 cgraphunit.c
--- cgraphunit.c 2 Jun 2005 20:33:04 -0000 1.114
+++ cgraphunit.c 3 Jun 2005 13:36:32 -0000
@@ -421,7 +421,7 @@ cgraph_finalize_function (tree decl, boo
if (decide_is_function_needed (node, decl))
cgraph_mark_needed_node (node);
- /* Since we reclaim unrechable nodes at the end of every language
+ /* Since we reclaim unreachable nodes at the end of every language
level unit, we need to be conservative about possible entry points
there. */
if (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_EXTERNAL (decl))
Index: config/mips/mips.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/mips/mips.c,v
retrieving revision 1.508
diff -u -d -p -r1.508 mips.c
--- config/mips/mips.c 2 Jun 2005 18:08:15 -0000 1.508
+++ config/mips/mips.c 3 Jun 2005 13:36:34 -0000
@@ -713,7 +713,7 @@ const struct mips_cpu_info mips_cpu_info
};
/* Default costs. If these are used for a processor we should look
- up the acutal costs. */
+ up the actual costs. */
#define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
COSTS_N_INSNS (7), /* fp_mult_sf */ \
COSTS_N_INSNS (8), /* fp_mult_df */ \
@@ -2361,7 +2361,8 @@ mips_rtx_costs (rtx x, int code, int out
Given the choice between "li R1,0...255" and "move R1,R2"
(where R2 is a known constant), it is usually better to use "li",
- since we do not want to unnessarily extend the lifetime of R2. */
+ since we do not want to unnecessarily extend the lifetime
+ of R2. */
if (outer_code == SET
&& INTVAL (x) >= 0
&& INTVAL (x) < 256)
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-05-07 14:46 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-05-07 14:46 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-05-07 Kazu Hirata <kazu@cs.umass.edu>
* tree-ssa-loop-ivcanon.c, config/i386/i386.c,
config/rs6000/rs6000.h: Fix comment typos.
Index: tree-ssa-loop-ivcanon.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-loop-ivcanon.c,v
retrieving revision 2.12
diff -u -d -p -r2.12 tree-ssa-loop-ivcanon.c
--- tree-ssa-loop-ivcanon.c 6 May 2005 21:11:29 -0000 2.12
+++ tree-ssa-loop-ivcanon.c 7 May 2005 14:37:27 -0000
@@ -59,7 +59,7 @@ Software Foundation, 59 Temple Place - S
enum unroll_level
{
- UL_SINGLE_ITER, /* Only loops that exit immediatelly in the first
+ UL_SINGLE_ITER, /* Only loops that exit immediately in the first
iteration. */
UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
of code size. */
Index: config/i386/i386.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/i386/i386.c,v
retrieving revision 1.819
diff -u -d -p -r1.819 i386.c
--- config/i386/i386.c 6 May 2005 19:31:13 -0000 1.819
+++ config/i386/i386.c 7 May 2005 14:37:29 -0000
@@ -11291,7 +11291,7 @@ ix86_expand_movmem (rtx dst, rtx src, rt
count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
but we don't know whether upper 24 (resp. 56) bits of %ecx will be
known to be zero or not. The rep; movsb sequence causes higher
- register preasure though, so take that into account. */
+ register pressure though, so take that into account. */
if ((!optimize || optimize_size)
&& (count == 0
Index: config/rs6000/rs6000.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/rs6000/rs6000.h,v
retrieving revision 1.363
diff -u -d -p -r1.363 rs6000.h
--- config/rs6000/rs6000.h 5 May 2005 20:54:25 -0000 1.363
+++ config/rs6000/rs6000.h 7 May 2005 14:37:30 -0000
@@ -128,7 +128,7 @@
/* Architecture type. */
-/* Define TARGET_MFCRF if the target assembler does not suppport the
+/* Define TARGET_MFCRF if the target assembler does not support the
optional field operand for mfcr. */
#ifndef HAVE_AS_MFCRF
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-04-19 22:27 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-04-19 22:27 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-04-19 Kazu Hirata <kazu@cs.umass.edu>
* tree-ssa-phiopt.c, config/arm/arm.c, config/fr30/fr30.md,
config/mcore/mcore.c: Fix comment typos.
Index: tree-ssa-phiopt.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-phiopt.c,v
retrieving revision 2.31
diff -u -d -p -r2.31 tree-ssa-phiopt.c
--- tree-ssa-phiopt.c 9 Apr 2005 01:37:24 -0000 2.31
+++ tree-ssa-phiopt.c 19 Apr 2005 22:02:09 -0000
@@ -301,7 +301,7 @@ empty_block_p (basic_block bb)
return true;
}
-/* Replace PHI node element whoes edge is E in block BB with variable NEW.
+/* Replace PHI node element whose edge is E in block BB with variable NEW.
Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
is known to have two edges, one of which must reach BB). */
Index: config/arm/arm.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/arm/arm.c,v
retrieving revision 1.442
diff -u -d -p -r1.442 arm.c
--- config/arm/arm.c 12 Apr 2005 06:33:25 -0000 1.442
+++ config/arm/arm.c 19 Apr 2005 22:02:12 -0000
@@ -12687,7 +12687,7 @@ is_called_in_ARM_mode (tree func)
if (TREE_CODE (func) != FUNCTION_DECL)
abort ();
- /* Ignore the problem about functions whoes address is taken. */
+ /* Ignore the problem about functions whose address is taken. */
if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
return TRUE;
Index: config/fr30/fr30.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/fr30/fr30.md,v
retrieving revision 1.26
diff -u -d -p -r1.26 fr30.md
--- config/fr30/fr30.md 4 Apr 2005 13:45:51 -0000 1.26
+++ config/fr30/fr30.md 19 Apr 2005 22:02:12 -0000
@@ -38,8 +38,8 @@
;; An instruction by default is considered to be 'delyabable'
;; that is, it can be placed into a delay slot, but it is not
;; itself a delayed branch type instruction. An instruction
-;; whoes type is 'delayed' is one which has a delay slot, and
-;; an instruction whoes delay_type is 'other' is one which does
+;; whose type is 'delayed' is one which has a delay slot, and
+;; an instruction whose delay_type is 'other' is one which does
;; not have a delay slot, nor can it be placed into a delay slot.
(define_attr "delay_type" "delayable,delayed,other" (const_string "delayable"))
@@ -316,7 +316,7 @@
)
;; If we are loading a large positive constant, one which has bits
-;; in the top byte set, but whoes set bits all lie within an 8 bit
+;; in the top byte set, but whose set bits all lie within an 8 bit
;; range, then we can save time and space by loading the byte value
;; and shifting it into place.
(define_split
Index: config/mcore/mcore.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/mcore/mcore.c,v
retrieving revision 1.78
diff -u -d -p -r1.78 mcore.c
--- config/mcore/mcore.c 4 Apr 2005 15:17:09 -0000 1.78
+++ config/mcore/mcore.c 19 Apr 2005 22:02:13 -0000
@@ -2705,7 +2705,7 @@ handle_structs_in_regs (enum machine_mod
{
int size;
- /* The MCore ABI defines that a structure whoes size is not a whole multiple
+ /* The MCore ABI defines that a structure whose size is not a whole multiple
of bytes is passed packed into registers (or spilled onto the stack if
not enough registers are available) with the last few bytes of the
structure being packed, left-justified, into the last register/stack slot.
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-04-13 14:35 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-04-13 14:35 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-04-13 Kazu Hirata <kazu@cs.umass.edu>
* basic-block.h, tree-ssa-uncprop.c, varasm.c,
config/i386/sse.md: Fix comment typos.
Index: basic-block.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/basic-block.h,v
retrieving revision 1.248
diff -u -d -p -r1.248 basic-block.h
--- basic-block.h 12 Apr 2005 21:33:46 -0000 1.248
+++ basic-block.h 13 Apr 2005 14:24:02 -0000
@@ -396,7 +396,7 @@ struct control_flow_graph GTY(())
#define BASIC_BLOCK_FOR_FUNCTION(FN,N) \
(VARRAY_BB (basic_block_info_for_function(FN), (N)))
-/* Defines for texual backward source compatibility. */
+/* Defines for textual backward source compatibility. */
#define ENTRY_BLOCK_PTR (cfun->cfg->x_entry_block_ptr)
#define EXIT_BLOCK_PTR (cfun->cfg->x_exit_block_ptr)
#define basic_block_info (cfun->cfg->x_basic_block_info)
Index: tree-ssa-uncprop.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-uncprop.c,v
retrieving revision 2.1
diff -u -d -p -r2.1 tree-ssa-uncprop.c
--- tree-ssa-uncprop.c 13 Apr 2005 04:29:40 -0000 2.1
+++ tree-ssa-uncprop.c 13 Apr 2005 14:24:02 -0000
@@ -258,7 +258,7 @@ associate_equivalences_with_edges (void)
COND_EXPRs and SWITCH_EXPRs.
We want to do those propagations as they can sometimes allow
- the SSA optimziers to do a better job. However, in the cases
+ the SSA optimizers to do a better job. However, in the cases
where such propagations do not result in further optimization,
we would like to "undo" the propagation to avoid the redundant
copies and constant initializations.
@@ -507,7 +507,7 @@ uncprop_into_successor_phis (struct dom_
/* Walk every equivalence with the same value. If we find
one with the same underlying variable as the PHI result,
then replace the value in the argument with its equivalent
- SSA_NAME. Use the most recent equivlance as hopefully
+ SSA_NAME. Use the most recent equivalence as hopefully
that results in shortest lifetimes. */
for (j = VARRAY_ACTIVE_SIZE (elt->equivalences) - 1; j >= 0; j--)
{
Index: varasm.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/varasm.c,v
retrieving revision 1.497
diff -u -d -p -r1.497 varasm.c
--- varasm.c 12 Apr 2005 22:38:06 -0000 1.497
+++ varasm.c 13 Apr 2005 14:24:03 -0000
@@ -129,8 +129,8 @@ char *hot_section_end_label;
char *cold_section_end_label;
-/* The following global variable indicates the seciton name to be used
- for the current cold section, when partitiong hot and cold basic
+/* The following global variable indicates the section name to be used
+ for the current cold section, when partitioning hot and cold basic
blocks into separate sections. */
char *unlikely_text_section_name;
Index: config/i386/sse.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/i386/sse.md,v
retrieving revision 1.8
diff -u -d -p -r1.8 sse.md
--- config/i386/sse.md 13 Apr 2005 04:59:33 -0000 1.8
+++ config/i386/sse.md 13 Apr 2005 14:24:03 -0000
@@ -774,7 +774,7 @@
(set_attr "mode" "V4SF")])
;; Also define scalar versions. These are used for abs, neg, and
-;; conditional move. Using subregs into vector modes causes regiser
+;; conditional move. Using subregs into vector modes causes register
;; allocation lossage. These patterns do not allow memory operands
;; because the native instructions read the full 128-bits.
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-04-09 16:43 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-04-09 16:43 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-04-09 Kazu Hirata <kazu@cs.umass.edu>
* dominance.c, gthr-win32.h, reg-stack.c, tree-ssa-copy.c,
tree-ssa-operands.c, tree-ssa.c, tree-vrp.c, varasm.c,
config/alpha/alpha.c, config/arm/arm.c, config/m32r/m32r.h,
config/rs6000/predicates.md: Fix comment typos.
Index: dominance.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/dominance.c,v
retrieving revision 1.37
diff -u -d -p -r1.37 dominance.c
--- dominance.c 9 Apr 2005 01:37:22 -0000 1.37
+++ dominance.c 9 Apr 2005 16:39:06 -0000
@@ -30,7 +30,7 @@
The algorithm computes this dominator tree implicitly by computing for
each block its immediate dominator. We use tree balancing and path
- compression, so its the O(e*a(e,v)) variant, where a(e,v) is the very
+ compression, so it's the O(e*a(e,v)) variant, where a(e,v) is the very
slowly growing functional inverse of the Ackerman function. */
#include "config.h"
Index: gthr-win32.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/gthr-win32.h,v
retrieving revision 1.26
diff -u -d -p -r1.26 gthr-win32.h
--- gthr-win32.h 25 Sep 2004 14:36:37 -0000 1.26
+++ gthr-win32.h 9 Apr 2005 16:39:06 -0000
@@ -71,7 +71,7 @@ Software Foundation, 59 Temple Place - S
#ifdef _LIBOBJC
/* This is necessary to prevent windef.h (included from windows.h) from
- defining it's own BOOL as a typedef. */
+ defining its own BOOL as a typedef. */
#ifndef __OBJC__
#define __OBJC__
#endif
Index: reg-stack.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/reg-stack.c,v
retrieving revision 1.175
diff -u -d -p -r1.175 reg-stack.c
--- reg-stack.c 31 Mar 2005 14:59:52 -0000 1.175
+++ reg-stack.c 9 Apr 2005 16:39:09 -0000
@@ -1424,7 +1424,7 @@ subst_stack_regs_pat (rtx insn, stack re
if (pat != PATTERN (insn))
{
/* The fix_truncdi_1 pattern wants to be able to allocate
- it's own scratch register. It does this by clobbering
+ its own scratch register. It does this by clobbering
an fp reg so that it is assured of an empty reg-stack
register. If the register is live, kill it now.
Remove the DEAD/UNUSED note so we don't try to kill it
Index: tree-ssa-copy.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-copy.c,v
retrieving revision 2.24
diff -u -d -p -r2.24 tree-ssa-copy.c
--- tree-ssa-copy.c 9 Apr 2005 01:37:23 -0000 2.24
+++ tree-ssa-copy.c 9 Apr 2005 16:39:09 -0000
@@ -430,7 +430,7 @@ get_last_copy_of (tree var)
/* Set FIRST to be the first variable in the copy-of chain for DEST.
- If DEST's copy-of value or its copy-of chain have changed, return
+ If DEST's copy-of value or its copy-of chain has changed, return
true.
MEM_REF is the memory reference where FIRST is stored. This is
Index: tree-ssa-operands.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-operands.c,v
retrieving revision 2.75
diff -u -d -p -r2.75 tree-ssa-operands.c
--- tree-ssa-operands.c 9 Apr 2005 01:37:24 -0000 2.75
+++ tree-ssa-operands.c 9 Apr 2005 16:39:12 -0000
@@ -67,7 +67,7 @@ Boston, MA 02111-1307, USA. */
on each of the 5 operand vectors which have been built up.
If the stmt had a previous operand cache, the finalization routines
- attempt to match up the new operands with the old ones. If its a perfect
+ attempt to match up the new operands with the old ones. If it's a perfect
match, the old vector is simply reused. If it isn't a perfect match, then
a new vector is created and the new operands are placed there. For
virtual operands, if the previous cache had SSA_NAME version of a
@@ -473,7 +473,7 @@ correct_use_link (ssa_imm_use_t *ptr, tr
if (prev)
{
bool stmt_mod = true;
- /* Find the first element which isn't a SAFE iterator, is in a sifferent
+ /* Find the first element which isn't a SAFE iterator, is in a different
stmt, and is not a a modified stmt, That node is in the correct list,
see if we are too. */
@@ -493,7 +493,7 @@ correct_use_link (ssa_imm_use_t *ptr, tr
root = prev->stmt;
else
root = *(prev->use);
- /* If its the right list, simply return. */
+ /* If it's the right list, simply return. */
if (root == *(ptr->use))
return;
}
Index: tree-ssa.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa.c,v
retrieving revision 2.86
diff -u -d -p -r2.86 tree-ssa.c
--- tree-ssa.c 9 Apr 2005 01:37:24 -0000 2.86
+++ tree-ssa.c 9 Apr 2005 16:39:13 -0000
@@ -261,7 +261,7 @@ verify_use (basic_block bb, basic_block
}
/* Make sure the use is in an appropriate list by checking the previous
- element to make sure its the same. */
+ element to make sure it's the same. */
if (use_p->prev == NULL)
{
error ("No immediate_use list");
Index: tree-vrp.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-vrp.c,v
retrieving revision 2.1
diff -u -d -p -r2.1 tree-vrp.c
--- tree-vrp.c 9 Apr 2005 01:37:28 -0000 2.1
+++ tree-vrp.c 9 Apr 2005 16:39:15 -0000
@@ -2000,7 +2000,7 @@ vrp_meet (value_range *vr0, value_range
if (compare_values (vr0->min, vr1->min) == 1)
min = vr1->min;
- /* The upper limit of the new range is the maximium of the
+ /* The upper limit of the new range is the maximum of the
two ranges. */
if (compare_values (vr0->max, vr1->max) == -1)
max = vr1->max;
Index: varasm.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/varasm.c,v
retrieving revision 1.493
diff -u -d -p -r1.493 varasm.c
--- varasm.c 1 Apr 2005 14:36:37 -0000 1.493
+++ varasm.c 9 Apr 2005 16:39:20 -0000
@@ -5402,7 +5402,7 @@ default_valid_pointer_mode (enum machine
}
/* Default function to output code that will globalize a label. A
- target must define GLOBAL_ASM_OP or provide it's own function to
+ target must define GLOBAL_ASM_OP or provide its own function to
globalize a label. */
#ifdef GLOBAL_ASM_OP
void
Index: config/alpha/alpha.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/alpha/alpha.c,v
retrieving revision 1.413
diff -u -d -p -r1.413 alpha.c
--- config/alpha/alpha.c 30 Mar 2005 18:53:11 -0000 1.413
+++ config/alpha/alpha.c 9 Apr 2005 16:39:29 -0000
@@ -8418,7 +8418,7 @@ alphaev4_next_group (rtx insn, int *pin_
if (in_use)
goto done;
- /* If this is a completely unrecognized insn, its an asm.
+ /* If this is a completely unrecognized insn, it's an asm.
We don't know how long it is, so record length as -1 to
signal a needed realignment. */
if (recog_memoized (insn) < 0)
@@ -8516,7 +8516,7 @@ alphaev5_next_group (rtx insn, int *pin_
if (in_use)
goto done;
- /* If this is a completely unrecognized insn, its an asm.
+ /* If this is a completely unrecognized insn, it's an asm.
We don't know how long it is, so record length as -1 to
signal a needed realignment. */
if (recog_memoized (insn) < 0)
Index: config/arm/arm.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/arm/arm.c,v
retrieving revision 1.439
diff -u -d -p -r1.439 arm.c
--- config/arm/arm.c 9 Apr 2005 12:03:51 -0000 1.439
+++ config/arm/arm.c 9 Apr 2005 16:39:41 -0000
@@ -7420,7 +7420,7 @@ arm_const_double_inline_cost (rtx val)
NULL_RTX, NULL_RTX, 0, 0));
}
-/* Return true if it is worthwile to split a 64-bit constant into two
+/* Return true if it is worthwhile to split a 64-bit constant into two
32-bit operations. This is the case if optimizing for size, or
if we have load delay slots, or if one 32-bit part can be done with
a single data operation. */
Index: config/m32r/m32r.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/m32r/m32r.h,v
retrieving revision 1.127
diff -u -d -p -r1.127 m32r.h
--- config/m32r/m32r.h 29 Mar 2005 17:32:05 -0000 1.127
+++ config/m32r/m32r.h 9 Apr 2005 16:39:43 -0000
@@ -1609,7 +1609,7 @@ extern char m32r_punct_chars[256];
After generation of rtl, the compiler makes no further distinction
between pointers and any other objects of this machine mode. */
/* ??? The M32R doesn't have full 32 bit pointers, but making this PSImode has
- it's own problems (you have to add extendpsisi2 and truncsipsi2).
+ its own problems (you have to add extendpsisi2 and truncsipsi2).
Try to avoid it. */
#define Pmode SImode
Index: config/rs6000/predicates.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/rs6000/predicates.md,v
retrieving revision 1.12
diff -u -d -p -r1.12 predicates.md
--- config/rs6000/predicates.md 9 Apr 2005 15:39:34 -0000 1.12
+++ config/rs6000/predicates.md 9 Apr 2005 16:39:44 -0000
@@ -339,7 +339,7 @@
;; Return 1 if the operand is in volatile memory. Note that during the
;; RTL generation phase, memory_operand does not return TRUE for volatile
;; memory references. So this function allows us to recognize volatile
-;; references where its safe.
+;; references where it's safe.
(define_predicate "volatile_mem_operand"
(and (and (match_code "mem")
(match_test "MEM_VOLATILE_P (op)"))
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-04-06 17:06 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-04-06 17:06 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-04-06 Kazu Hirata <kazu@cs.umass.edu>
* cse.c, tree-flow-inline.h, tree-flow.h, tree-ssa-operands.c,
tree-ssa-sink.c, tree.h, config/bfin/bfin.c,
config/bfin/bfin.h: Fix comment typos.
Index: cse.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/cse.c,v
retrieving revision 1.352
diff -u -d -p -r1.352 cse.c
--- cse.c 5 Apr 2005 14:50:32 -0000 1.352
+++ cse.c 6 Apr 2005 16:56:05 -0000
@@ -3808,7 +3808,7 @@ fold_rtx (rtx x, rtx insn)
/* It's not safe to substitute the operand of a conversion
operator with a constant, as the conversion's identity
- depends upon the mode of it's operand. This optimization
+ depends upon the mode of its operand. This optimization
is handled by the call to simplify_unary_operation. */
if (GET_RTX_CLASS (code) == RTX_UNARY
&& GET_MODE (replacements[j]) != mode_arg0
Index: tree-flow-inline.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-flow-inline.h,v
retrieving revision 2.35
diff -u -d -p -r2.35 tree-flow-inline.h
--- tree-flow-inline.h 5 Apr 2005 19:05:00 -0000 2.35
+++ tree-flow-inline.h 6 Apr 2005 16:56:05 -0000
@@ -347,12 +347,12 @@ next_safe_imm_use (imm_use_iterator *imm
use_operand_p old;
old = imm->imm_use;
- /* If the next node following the iter_node is still the one refered to by
- imm_use, then the list hasnt changed, go to the next node. */
+ /* If the next node following the iter_node is still the one referred to by
+ imm_use, then the list hasn't changed, go to the next node. */
if (imm->iter_node.next == imm->imm_use)
{
ptr = &(imm->iter_node);
- /* Remove iternode fromn the list. */
+ /* Remove iternode from the list. */
delink_imm_use (ptr);
imm->imm_use = imm->imm_use->next;
if (! end_safe_imm_use_p (imm))
@@ -369,7 +369,7 @@ next_safe_imm_use (imm_use_iterator *imm
else
{
/* If the 'next' value after the iterator isn't the same as it was, then
- a node has been deleted, so we sinply proceed to the node following
+ a node has been deleted, so we simply proceed to the node following
where the iterator is in the list. */
imm->imm_use = imm->iter_node.next;
if (end_safe_imm_use_p (imm))
@@ -382,7 +382,7 @@ next_safe_imm_use (imm_use_iterator *imm
return imm->imm_use;
}
-/* Return true is IMM has reached the end of the immeidate use list. */
+/* Return true is IMM has reached the end of the immediate use list. */
static inline bool
end_readonly_imm_use_p (imm_use_iterator *imm)
{
@@ -447,7 +447,7 @@ has_single_use (tree var)
}
/* If VAR has only a single immediate use, return true, and set USE_P and STMT
- to the use pointer and stmt of occurence. */
+ to the use pointer and stmt of occurrence. */
static inline bool
single_imm_use (tree var, use_operand_p *use_p, tree *stmt)
{
@@ -684,7 +684,7 @@ phi_arg_index_from_use (use_operand_p us
int index;
tree phi;
- /* Since the use is the first thing in a PHI arguemnt element, we can
+ /* Since the use is the first thing in a PHI argument element, we can
calculate its index based on casting it to an argument, and performing
pointer arithmetic. */
@@ -697,7 +697,7 @@ phi_arg_index_from_use (use_operand_p us
#ifdef ENABLE_CHECKING
/* Make sure the calculation doesn't have any leftover bytes. If it does,
- then imm_use is liekly not the first element in phi_arg_d. */
+ then imm_use is likely not the first element in phi_arg_d. */
gcc_assert (
(((char *)element - (char *)root) % sizeof (struct phi_arg_d)) == 0);
gcc_assert (index >= 0 && index < PHI_ARG_CAPACITY (phi));
Index: tree-flow.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-flow.h,v
retrieving revision 2.90
diff -u -d -p -r2.90 tree-flow.h
--- tree-flow.h 5 Apr 2005 23:52:41 -0000 2.90
+++ tree-flow.h 6 Apr 2005 16:56:06 -0000
@@ -243,7 +243,7 @@ typedef struct immediate_use_iterator_d
} imm_use_iterator;
-/* Use this iterator when simply looking at stmts. Adding, deleteing or
+/* Use this iterator when simply looking at stmts. Adding, deleting or
modifying stmts will cause this iterator to malfunction. */
#define FOR_EACH_IMM_USE_FAST(DEST, ITER, SSAVAR) \
Index: tree-ssa-operands.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-operands.c,v
retrieving revision 2.72
diff -u -d -p -r2.72 tree-ssa-operands.c
--- tree-ssa-operands.c 5 Apr 2005 22:23:11 -0000 2.72
+++ tree-ssa-operands.c 6 Apr 2005 16:56:07 -0000
@@ -993,7 +993,7 @@ append_v_must_def (tree var)
/* Parse STMT looking for operands. OLD_OPS is the original stmt operand
- cache for STMT, if it exested before. When fniished, the various build_*
+ cache for STMT, if it existed before. When finished, the various build_*
operand vectors will have potential operands. in them. */
static void
Index: tree-ssa-sink.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-sink.c,v
retrieving revision 1.3
diff -u -d -p -r1.3 tree-ssa-sink.c
--- tree-ssa-sink.c 5 Apr 2005 19:05:14 -0000 1.3
+++ tree-ssa-sink.c 6 Apr 2005 16:56:08 -0000
@@ -79,7 +79,7 @@ static struct
} sink_stats;
-/* Given a PHI, and one of it's arguments (DEF), find the edge for
+/* Given a PHI, and one of its arguments (DEF), find the edge for
that argument and return it. If the argument occurs twice in the PHI node,
we return NULL. */
Index: tree.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree.h,v
retrieving revision 1.710
diff -u -d -p -r1.710 tree.h
--- tree.h 5 Apr 2005 19:05:20 -0000 1.710
+++ tree.h 6 Apr 2005 16:56:11 -0000
@@ -1350,7 +1350,7 @@ struct ptr_info_def;
-/* Immediate use linking structure. THis structure is used for maintaining
+/* Immediate use linking structure. This structure is used for maintaining
a doubly linked list of uses of an SSA_NAME. */
typedef struct ssa_imm_use_d GTY(())
{
@@ -1396,7 +1396,7 @@ struct tree_ssa_name GTY(())
you wish to access the use or def fields of a PHI_NODE in the SSA
optimizers, use the accessor macros found in tree-ssa-operands.h.
These two macros are to be used only by those accessor macros, and other
- select places where we *absolutly* must take the address of the tree. */
+ select places where we *absolutely* must take the address of the tree. */
#define PHI_RESULT_TREE(NODE) PHI_NODE_CHECK (NODE)->phi.result
#define PHI_ARG_DEF_TREE(NODE, I) PHI_NODE_ELT_CHECK (NODE, I).def
Index: config/bfin/bfin.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/bfin/bfin.c,v
retrieving revision 1.1
diff -u -d -p -r1.1 bfin.c
--- config/bfin/bfin.c 5 Apr 2005 11:26:48 -0000 1.1
+++ config/bfin/bfin.c 6 Apr 2005 16:56:12 -0000
@@ -1,4 +1,4 @@
-/* The Blackfin code generation auxilary output file.
+/* The Blackfin code generation auxiliary output file.
Copyright (C) 2005 Free Software Foundation, Inc.
Contributed by Analog Devices.
@@ -320,7 +320,7 @@ setup_incoming_varargs (CUMULATIVE_ARGS
/* The move for named arguments will be generated automatically by the
compiler. We need to generate the move rtx for the unnamed arguments
- if they are in the first 3 words. We assume atleast 1 named argument
+ if they are in the first 3 words. We assume at least 1 named argument
exists, so we never generate [ARGP] = R0 here. */
for (i = cum->words + 1; i < max_arg_registers; i++)
Index: config/bfin/bfin.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/bfin/bfin.h,v
retrieving revision 1.1
diff -u -d -p -r1.1 bfin.h
--- config/bfin/bfin.h 5 Apr 2005 11:26:48 -0000 1.1
+++ config/bfin/bfin.h 6 Apr 2005 16:56:13 -0000
@@ -345,7 +345,7 @@ enum reg_class
BREGS,
LREGS,
MREGS,
- CIRCREGS, /* Circular buffering registers, Ix, Bx, Lx together form. See Automatic Circlur Buffering */
+ CIRCREGS, /* Circular buffering registers, Ix, Bx, Lx together form. See Automatic Circular Buffering. */
DAGREGS,
EVEN_AREGS,
ODD_AREGS,
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-04-01 3:43 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-04-01 3:43 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-04-01 Kazu Hirata <kazu@cs.umass.edu>
* cgraphunit.c, dbxout.c, flow.c, gcse.c, gimplify.c,
lambda-code.c, loop.c, machmode.def, mips-tfile.c,
modulo-sched.c, passes.c, postreload-gcse.c, tree-eh.c,
tree-ssa-ccp.c, varasm.c, config/frv/frv.c, config/frv/frv.h,
config/frv/frv.md, config/i386/i386.c, config/i386/i386.h,
config/i386/i386.md, config/rs6000/predicates.md,
config/rs6000/rs6000.c, config/s390/fixdfdi.h,
config/s390/s390.c, config/stormy16/stormy16.c,
config/stormy16/stormy16.md, config/vax/vax.md: Fix comment
typos.
Index: cgraphunit.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/cgraphunit.c,v
retrieving revision 1.98
diff -u -d -p -r1.98 cgraphunit.c
--- cgraphunit.c 30 Mar 2005 22:27:42 -0000 1.98
+++ cgraphunit.c 1 Apr 2005 03:34:25 -0000
@@ -282,8 +282,8 @@ decide_is_function_needed (struct cgraph
return false;
}
-/* Walk the decls we marked as neccesary and see if they reference new variables
- or functions and add them into the worklists. */
+/* Walk the decls we marked as necessary and see if they reference new
+ variables or functions and add them into the worklists. */
static bool
cgraph_varpool_analyze_pending_decls (void)
{
@@ -307,10 +307,10 @@ cgraph_varpool_analyze_pending_decls (vo
}
/* Optimization of function bodies might've rendered some variables as
- unnecesary so we want to avoid these from being compiled.
+ unnecessary so we want to avoid these from being compiled.
This is done by prunning the queue and keeping only the variables that
- really appear needed (ie thery are either externally visible or referenced
+ really appear needed (ie they are either externally visible or referenced
by compiled function). Re-doing the reachability analysis on variables
brings back the remaining variables referenced by these. */
static void
@@ -772,7 +772,7 @@ cgraph_finalize_compilation_unit (void)
{
struct cgraph_node *node;
/* Keep track of already processed nodes when called multiple times for
- intermodule optmization. */
+ intermodule optimization. */
static struct cgraph_node *first_analyzed;
finish_aliases_1 ();
Index: dbxout.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/dbxout.c,v
retrieving revision 1.224
diff -u -d -p -r1.224 dbxout.c
--- dbxout.c 31 Mar 2005 14:59:48 -0000 1.224
+++ dbxout.c 1 Apr 2005 03:34:29 -0000
@@ -2360,7 +2360,7 @@ dbxout_symbol (tree decl, int local ATTR
DBXOUT_DECR_NESTING_AND_RETURN (0);
/* If we are to generate only the symbols actually used then such
- symbol nodees are flagged with TREE_USED. Ignore any that
+ symbol nodes are flagged with TREE_USED. Ignore any that
aren't flaged as TREE_USED. */
if (flag_debug_only_used_symbols
Index: flow.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/flow.c,v
retrieving revision 1.622
diff -u -d -p -r1.622 flow.c
--- flow.c 11 Mar 2005 09:04:52 -0000 1.622
+++ flow.c 1 Apr 2005 03:34:33 -0000
@@ -273,7 +273,7 @@ static int ndead;
(remember, we are walking backward). This can be computed as current
pbi->insn_num - reg_deaths[regno].
At the end of processing each basic block, the remaining live registers
- are inspected and liferanges are increased same way so liverange of global
+ are inspected and live ranges are increased same way so liverange of global
registers are computed correctly.
The array is maintained clear for dead registers, so it can be safely reused
Index: gcse.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/gcse.c,v
retrieving revision 1.337
diff -u -d -p -r1.337 gcse.c
--- gcse.c 11 Mar 2005 09:04:58 -0000 1.337
+++ gcse.c 1 Apr 2005 03:34:39 -0000
@@ -3260,7 +3260,7 @@ cprop (int alter_jumps)
settle for the condition variable in the jump instruction being integral.
We prefer to be able to record the value of a user variable, rather than
the value of a temporary used in a condition. This could be solved by
- recording the value of *every* register scaned by canonicalize_condition,
+ recording the value of *every* register scanned by canonicalize_condition,
but this would require some code reorganization. */
rtx
Index: gimplify.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/gimplify.c,v
retrieving revision 2.121
diff -u -d -p -r2.121 gimplify.c
--- gimplify.c 31 Mar 2005 23:34:38 -0000 2.121
+++ gimplify.c 1 Apr 2005 03:34:44 -0000
@@ -4428,7 +4428,7 @@ gimplify_one_sizepos (tree *expr_p, tree
{
/* We don't do anything if the value isn't there, is constant, or contains
A PLACEHOLDER_EXPR. We also don't want to do anything if it's already
- a VAR_DECL. If it's a VAR_DECL from another function, the gimplfier
+ a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier
will want to replace it with a new variable, but that will cause problems
if this type is from outside the function. It's OK to have that here. */
if (*expr_p == NULL_TREE || TREE_CONSTANT (*expr_p)
Index: lambda-code.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/lambda-code.c,v
retrieving revision 2.30
diff -u -d -p -r2.30 lambda-code.c
--- lambda-code.c 13 Mar 2005 15:54:15 -0000 2.30
+++ lambda-code.c 1 Apr 2005 03:34:46 -0000
@@ -489,7 +489,7 @@ lcm (int a, int b)
}
/* Perform Fourier-Motzkin elimination to calculate the bounds of the
- auxillary nest.
+ auxiliary nest.
Fourier-Motzkin is a way of reducing systems of linear inequalities so that
it is easy to calculate the answer and bounds.
A sketch of how it works:
@@ -800,7 +800,7 @@ lambda_compute_auxillary_space (lambda_l
/* Compute the loop bounds for the target space, using the bounds of
the auxiliary nest AUXILLARY_NEST, and the triangular matrix H.
The target space loop bounds are computed by multiplying the triangular
- matrix H by the auxillary nest, to get the new loop bounds. The sign of
+ matrix H by the auxiliary nest, to get the new loop bounds. The sign of
the loop steps (positive or negative) is then used to swap the bounds if
the loop counts downwards.
Return the target loopnest. */
@@ -1057,8 +1057,8 @@ lambda_compute_step_signs (lambda_trans_
2. Composing the dense base with the specified transformation (TRANS)
3. Decomposing the combined transformation into a lower triangular portion,
and a unimodular portion.
- 4. Computing the auxillary nest using the unimodular portion.
- 5. Computing the target nest using the auxillary nest and the lower
+ 4. Computing the auxiliary nest using the unimodular portion.
+ 5. Computing the target nest using the auxiliary nest and the lower
triangular portion. */
lambda_loopnest
Index: loop.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/loop.c,v
retrieving revision 1.522
diff -u -d -p -r1.522 loop.c
--- loop.c 17 Jan 2005 08:46:15 -0000 1.522
+++ loop.c 1 Apr 2005 03:34:57 -0000
@@ -8904,7 +8904,7 @@ combine_givs (struct loop_regs *regs, st
/* If a DEST_REG GIV is used only once, do not allow it to combine
with anything, for in doing so we will gain nothing that cannot
be had by simply letting the GIV with which we would have combined
- to be reduced on its own. The losage shows up in particular with
+ to be reduced on its own. The lossage shows up in particular with
DEST_ADDR targets on hosts with reg+reg addressing, though it can
be seen elsewhere as well. */
if (g1->giv_type == DEST_REG
Index: machmode.def
===================================================================
RCS file: /cvs/gcc/gcc/gcc/machmode.def,v
retrieving revision 1.29
diff -u -d -p -r1.29 machmode.def
--- machmode.def 15 Oct 2004 14:47:09 -0000 1.29
+++ machmode.def 1 Apr 2005 03:34:58 -0000
@@ -41,7 +41,7 @@ Software Foundation, 59 Temple Place - S
A CLASS argument must be one of the constants defined in
mode-classes.def, less the leading MODE_ prefix; some statements
- that take CLASS arguments have restructions on which classes are
+ that take CLASS arguments have restrictions on which classes are
acceptable. For instance, INT.
A MODE argument must be the printable name of a machine mode,
Index: mips-tfile.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/mips-tfile.c,v
retrieving revision 1.65
diff -u -d -p -r1.65 mips-tfile.c
--- mips-tfile.c 10 Sep 2004 15:09:38 -0000 1.65
+++ mips-tfile.c 1 Apr 2005 03:35:02 -0000
@@ -1746,7 +1746,7 @@ add_string (varray_t *vp, shash_t **hash
\f
/* Add a local symbol. The symbol string starts at STR_START and the
- first byte after it is makred by STR_END_P1. The symbol has type
+ first byte after it is marked by STR_END_P1. The symbol has type
TYPE and storage class STORAGE and value VALUE. INDX is an index
to local/aux. symbols. */
Index: modulo-sched.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/modulo-sched.c,v
retrieving revision 1.23
diff -u -d -p -r1.23 modulo-sched.c
--- modulo-sched.c 21 Mar 2005 18:49:23 -0000 1.23
+++ modulo-sched.c 1 Apr 2005 03:35:04 -0000
@@ -455,7 +455,7 @@ calculate_maxii (ddg_ptr g)
true-dependence of distance 1): SCHED_TIME (def) < SCHED_TIME (use) and
if so generate a register move. The number of such moves is equal to:
SCHED_TIME (use) - SCHED_TIME (def) { 0 broken
- nreg_moves = ----------------------------------- + 1 - { dependecnce.
+ nreg_moves = ----------------------------------- + 1 - { dependence.
ii { 1 if not.
*/
static void
Index: passes.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/passes.c,v
retrieving revision 2.78
diff -u -d -p -r2.78 passes.c
--- passes.c 31 Mar 2005 14:59:51 -0000 2.78
+++ passes.c 1 Apr 2005 03:35:06 -0000
@@ -252,7 +252,7 @@ rest_of_decl_compilation (tree decl,
timevar_pop (TV_SYMOUT);
}
- /* Let cgraph know about the existance of variables. */
+ /* Let cgraph know about the existence of variables. */
if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl))
cgraph_varpool_node (decl);
}
Index: postreload-gcse.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/postreload-gcse.c,v
retrieving revision 2.10
diff -u -d -p -r2.10 postreload-gcse.c
--- postreload-gcse.c 31 Mar 2005 16:38:24 -0000 2.10
+++ postreload-gcse.c 1 Apr 2005 03:35:07 -0000
@@ -300,7 +300,7 @@ hash_expr_for_htab (const void *expp)
return exp->hash;
}
-/* Callbach for hashtab.
+/* Callback for hashtab.
Return nonzero if exp1 is equivalent to exp2. */
static int
Index: tree-eh.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-eh.c,v
retrieving revision 2.27
diff -u -d -p -r2.27 tree-eh.c
--- tree-eh.c 9 Mar 2005 11:31:51 -0000 2.27
+++ tree-eh.c 1 Apr 2005 03:35:09 -0000
@@ -50,7 +50,7 @@ using_eh_for_cleanups (void)
/* Misc functions used in this file. */
/* Compare and hash for any structure which begins with a canonical
- pointer. Assumes all pointers are interchangable, which is sort
+ pointer. Assumes all pointers are interchangeable, which is sort
of already assumed by gcc elsewhere IIRC. */
static int
Index: tree-ssa-ccp.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-ccp.c,v
retrieving revision 2.60
diff -u -d -p -r2.60 tree-ssa-ccp.c
--- tree-ssa-ccp.c 21 Mar 2005 14:35:07 -0000 2.60
+++ tree-ssa-ccp.c 1 Apr 2005 03:35:11 -0000
@@ -1582,7 +1582,7 @@ maybe_fold_stmt_indirect (tree expr, tre
/* We can get here for out-of-range string constant accesses,
such as "_"[3]. Bail out of the entire substitution search
and arrange for the entire statement to be replaced by a
- call to __builtin_trap. In all likelyhood this will all be
+ call to __builtin_trap. In all likelihood this will all be
constant-folded away, but in the meantime we can't leave with
something that get_expr_operands can't understand. */
Index: varasm.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/varasm.c,v
retrieving revision 1.491
diff -u -d -p -r1.491 varasm.c
--- varasm.c 31 Mar 2005 14:59:52 -0000 1.491
+++ varasm.c 1 Apr 2005 03:35:15 -0000
@@ -4421,7 +4421,7 @@ globalize_decl (tree decl)
/* We have to be able to tell cgraph about the needed-ness of the target
of an alias. This requires that the decl have been defined. Aliases
- that preceed their definition have to be queued for later processing. */
+ that precede their definition have to be queued for later processing. */
struct alias_pair GTY(())
{
Index: config/frv/frv.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/frv/frv.c,v
retrieving revision 1.85
diff -u -d -p -r1.85 frv.c
--- config/frv/frv.c 22 Mar 2005 19:19:17 -0000 1.85
+++ config/frv/frv.c 1 Apr 2005 03:35:24 -0000
@@ -781,7 +781,7 @@ frv_override_options (void)
You should not use this macro to change options that are not
machine-specific. These should uniformly selected by the same optimization
- level on all supported machines. Use this macro to enable machbine-specific
+ level on all supported machines. Use this macro to enable machine-specific
optimizations.
*Do not examine `write_symbols' in this macro!* The debugging options are
Index: config/frv/frv.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/frv/frv.h,v
retrieving revision 1.60
diff -u -d -p -r1.60 frv.h
--- config/frv/frv.h 22 Mar 2005 19:19:18 -0000 1.60
+++ config/frv/frv.h 1 Apr 2005 03:35:27 -0000
@@ -351,7 +351,7 @@
You should not use this macro to change options that are not
machine-specific. These should uniformly selected by the same optimization
- level on all supported machines. Use this macro to enable machbine-specific
+ level on all supported machines. Use this macro to enable machine-specific
optimizations.
*Do not examine `write_symbols' in this macro!* The debugging options are
Index: config/frv/frv.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/frv/frv.md,v
retrieving revision 1.31
diff -u -d -p -r1.31 frv.md
--- config/frv/frv.md 7 Feb 2005 08:06:29 -0000 1.31
+++ config/frv/frv.md 1 Apr 2005 03:35:34 -0000
@@ -1206,7 +1206,7 @@
(eq_attr "type" "sqrt_single"))
"(f1|f0) + fr550_float")
-;; Synthetic units for enforcing media issue restructions. Certain types
+;; Synthetic units for enforcing media issue restrictions. Certain types
;; of insn in M2 conflict with certain types in M0:
;;
;; M2
@@ -5913,7 +5913,7 @@
;; Called after register allocation to add any instructions needed for the
;; epilogue. Using an epilogue insn is favored compared to putting all of the
;; instructions in the FUNCTION_EPILOGUE macro, since it allows the scheduler
-;; to intermix instructions with the restires of the caller saved registers.
+;; to intermix instructions with the restores of the caller saved registers.
;; In some cases, it might be necessary to emit a barrier instruction as the
;; first insn to prevent such scheduling.
(define_expand "epilogue"
Index: config/i386/i386.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/i386/i386.c,v
retrieving revision 1.801
diff -u -d -p -r1.801 i386.c
--- config/i386/i386.c 30 Mar 2005 22:27:47 -0000 1.801
+++ config/i386/i386.c 1 Apr 2005 03:35:48 -0000
@@ -4112,7 +4112,7 @@ ix86_compute_frame_layout (struct ix86_f
/* During reload iteration the amount of registers saved can change.
Recompute the value as needed. Do not recompute when amount of registers
- didn't change as reload does mutiple calls to the function and does not
+ didn't change as reload does multiple calls to the function and does not
expect the decision to change within single iteration. */
if (!optimize_size
&& cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
@@ -6030,7 +6030,7 @@ i386_output_dwarf_dtprel (FILE *file, in
}
/* In the name of slightly smaller debug output, and to cater to
- general assembler losage, recognize PIC+GOTOFF and turn it back
+ general assembler lossage, recognize PIC+GOTOFF and turn it back
into a direct symbol reference. */
static rtx
@@ -6137,8 +6137,8 @@ put_condition_code (enum rtx_code code,
suffix = "g";
break;
case GTU:
- /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
- Those same assemblers have the same but opposite losage on cmov. */
+ /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
+ Those same assemblers have the same but opposite lossage on cmov. */
if (mode != CCmode)
abort ();
suffix = fp ? "nbe" : "a";
Index: config/i386/i386.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/i386/i386.h,v
retrieving revision 1.423
diff -u -d -p -r1.423 i386.h
--- config/i386/i386.h 15 Mar 2005 14:44:09 -0000 1.423
+++ config/i386/i386.h 1 Apr 2005 03:35:52 -0000
@@ -1183,7 +1183,7 @@ do { \
This is computed in `reload', in reload1.c. */
#define FRAME_POINTER_REQUIRED ix86_frame_pointer_required ()
-/* Override this in other tm.h files to cope with various OS losage
+/* Override this in other tm.h files to cope with various OS lossage
requiring a frame pointer. */
#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
#define SUBTARGET_FRAME_POINTER_REQUIRED 0
Index: config/i386/i386.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/i386/i386.md,v
retrieving revision 1.621
diff -u -d -p -r1.621 i386.md
--- config/i386/i386.md 29 Mar 2005 05:46:46 -0000 1.621
+++ config/i386/i386.md 1 Apr 2005 03:36:11 -0000
@@ -19357,7 +19357,7 @@
abort ();
/* Use 3dNOW prefetch in case we are asking for write prefetch not
- suported by SSE counterpart or the SSE prefetch is not available
+ supported by SSE counterpart or the SSE prefetch is not available
(K6 machines). Otherwise use SSE prefetch as it allows specifying
of locality. */
if (TARGET_3DNOW && (!TARGET_PREFETCH_SSE || rw))
Index: config/rs6000/predicates.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/rs6000/predicates.md,v
retrieving revision 1.8
diff -u -d -p -r1.8 predicates.md
--- config/rs6000/predicates.md 24 Mar 2005 23:07:35 -0000 1.8
+++ config/rs6000/predicates.md 1 Apr 2005 03:36:12 -0000
@@ -593,7 +593,7 @@
(match_code "symbol_ref,const,label_ref"))
;; Return 1 if op is a simple reference that can be loaded via the GOT,
-;; exclusing labels involving addition.
+;; excluding labels involving addition.
(define_predicate "got_no_const_operand"
(match_code "symbol_ref,label_ref"))
Index: config/rs6000/rs6000.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/rs6000/rs6000.c,v
retrieving revision 1.801
diff -u -d -p -r1.801 rs6000.c
--- config/rs6000/rs6000.c 31 Mar 2005 11:32:58 -0000 1.801
+++ config/rs6000/rs6000.c 1 Apr 2005 03:36:29 -0000
@@ -227,7 +227,7 @@ const char *rs6000_debug_name;
int rs6000_debug_stack; /* debug stack applications */
int rs6000_debug_arg; /* debug argument handling */
-/* Value is TRUE if register/mode pair is accepatable. */
+/* Value is TRUE if register/mode pair is acceptable. */
bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
/* Opaque types. */
@@ -15249,7 +15249,7 @@ static bool
rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost,
int distance)
{
- /* If the flag is not enbled - no dependence is considered costly;
+ /* If the flag is not enabled - no dependence is considered costly;
allow all dependent insns in the same group.
This is the most aggressive option. */
if (rs6000_sched_costly_dep == no_dep_costly)
Index: config/s390/fixdfdi.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/s390/fixdfdi.h,v
retrieving revision 1.4
diff -u -d -p -r1.4 fixdfdi.h
--- config/s390/fixdfdi.h 4 Jul 2003 22:28:57 -0000 1.4
+++ config/s390/fixdfdi.h 1 Apr 2005 03:36:29 -0000
@@ -55,7 +55,7 @@ __fixunsdfdi (double a1)
dl1.d = a1;
- /* +/- 0, denormalized, negativ */
+ /* +/- 0, denormalized, negative */
if (!EXPD (dl1) || SIGND(dl1))
return 0;
@@ -199,7 +199,7 @@ __fixunssfdi (float a1)
fl1.f = a1;
- /* +/- 0, denormalized, negativ */
+ /* +/- 0, denormalized, negative */
if (!EXP (fl1) || SIGN(fl1))
return 0;
Index: config/s390/s390.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/s390/s390.c,v
retrieving revision 1.219
diff -u -d -p -r1.219 s390.c
--- config/s390/s390.c 15 Mar 2005 15:46:52 -0000 1.219
+++ config/s390/s390.c 1 Apr 2005 03:36:37 -0000
@@ -4130,7 +4130,7 @@ s390_output_dwarf_dtprel (FILE *file, in
}
/* In the name of slightly smaller debug output, and to cater to
- general assembler losage, recognize various UNSPEC sequences
+ general assembler lossage, recognize various UNSPEC sequences
and turn them back into a direct symbol reference. */
static rtx
Index: config/stormy16/stormy16.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/stormy16/stormy16.c,v
retrieving revision 1.71
diff -u -d -p -r1.71 stormy16.c
--- config/stormy16/stormy16.c 31 Mar 2005 14:59:58 -0000 1.71
+++ config/stormy16/stormy16.c 1 Apr 2005 03:36:40 -0000
@@ -2658,7 +2658,7 @@ combine_bnp (rtx insn)
if (and)
{
- /* Some mis-optimisations by GCC can generate a RIGHT-SHIFT
+ /* Some mis-optimizations by GCC can generate a RIGHT-SHIFT
followed by an AND like this:
(parallel [(set (reg:HI r7) (lshiftrt:HI (reg:HI r7) (const_int 3)))
Index: config/stormy16/stormy16.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/stormy16/stormy16.md,v
retrieving revision 1.18
diff -u -d -p -r1.18 stormy16.md
--- config/stormy16/stormy16.md 3 Dec 2004 19:06:02 -0000 1.18
+++ config/stormy16/stormy16.md 1 Apr 2005 03:36:41 -0000
@@ -1104,7 +1104,7 @@
;; the epilogue. Using an epilogue insn is favored compared to putting
;; all of the instructions in the TARGET_ASM_FUNCTION_EPILOGUE macro,
;; since it allows the scheduler to intermix instructions with the
-;; restires of the caller saved registers. In some cases, it might be
+;; restores of the caller saved registers. In some cases, it might be
;; necessary to emit a barrier instruction as the first insn to
;; prevent such scheduling.
(define_expand "epilogue"
Index: config/vax/vax.md
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/vax/vax.md,v
retrieving revision 1.30
diff -u -d -p -r1.30 vax.md
--- config/vax/vax.md 27 Mar 2005 10:48:27 -0000 1.30
+++ config/vax/vax.md 1 Apr 2005 03:36:43 -0000
@@ -37,7 +37,7 @@
]
)
-;; Integer modes supported on VAX, withd a mapping from machine mode
+;; Integer modes supported on VAX, with a mapping from machine mode
;; to mnemonic suffix. DImode is always a special case.
(define_mode_macro VAXint [QI HI SI])
(define_mode_attr isfx [(QI "b") (HI "w") (SI "l")])
^ permalink raw reply [flat|nested] 38+ messages in thread
* [patch] gcc/*: Fix comment typos.
@ 2005-02-11 3:21 Kazu Hirata
0 siblings, 0 replies; 38+ messages in thread
From: Kazu Hirata @ 2005-02-11 3:21 UTC (permalink / raw)
To: gcc-patches
Hi,
Committed as obvious.
Kazu Hirata
2005-02-10 Kazu Hirata <kazu@cs.umass.edu>
* cse.c, tree-ssa-loop-ivopts.c, config/rs6000/linux-unwind.h:
Fix comment typos.
Index: cse.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/cse.c,v
retrieving revision 1.341
diff -u -d -p -r1.341 cse.c
--- cse.c 7 Feb 2005 17:10:27 -0000 1.341
+++ cse.c 10 Feb 2005 20:10:50 -0000
@@ -335,7 +335,7 @@ static unsigned int cse_reg_info_table_s
static unsigned int cse_reg_info_table_first_uninitialized;
/* The timestamp at the beginning of the current run of
- cse_basic_block. We increment this variable at at the beginning of
+ cse_basic_block. We increment this variable at the beginning of
the current run of cse_basic_block. The timestamp field of a
cse_reg_info entry matches the value of this variable if and only
if the entry has been initialized during the current run of
Index: tree-ssa-loop-ivopts.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/tree-ssa-loop-ivopts.c,v
retrieving revision 2.46
diff -u -d -p -r2.46 tree-ssa-loop-ivopts.c
--- tree-ssa-loop-ivopts.c 10 Feb 2005 19:02:44 -0000 2.46
+++ tree-ssa-loop-ivopts.c 10 Feb 2005 20:10:50 -0000
@@ -647,7 +647,7 @@ struct nfe_cache_elt
/* The edge for that the number of iterations is cached. */
edge exit;
- /* True if the # of iterations was succesfully determined. */
+ /* True if the # of iterations was successfully determined. */
bool valid_p;
/* Description of # of iterations. */
Index: config/rs6000/linux-unwind.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/rs6000/linux-unwind.h,v
retrieving revision 1.3
diff -u -d -p -r1.3 linux-unwind.h
--- config/rs6000/linux-unwind.h 31 Jan 2005 13:37:37 -0000 1.3
+++ config/rs6000/linux-unwind.h 10 Feb 2005 20:10:50 -0000
@@ -194,7 +194,7 @@ get_regs (struct _Unwind_Context *contex
}
#endif
-/* Find an entry in the process auxilliary vector. The canonical way to
+/* Find an entry in the process auxiliary vector. The canonical way to
test for VMX is to look at AT_HWCAP. */
static long
^ permalink raw reply [flat|nested] 38+ messages in thread
end of thread, other threads:[~2007-10-14 1:40 UTC | newest]
Thread overview: 38+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-07-29 0:52 [patch] gcc/*: Fix comment typos Kazu Hirata
-- strict thread matches above, loose matches on Subject: below --
2007-10-14 2:16 Kazu Hirata
2007-09-01 20:16 Kazu Hirata
2007-07-07 13:38 Kazu Hirata
2007-05-25 23:07 Kazu Hirata
2007-04-15 15:01 Kazu Hirata
2007-03-17 18:11 Kazu Hirata
2007-02-19 3:33 Kazu Hirata
2007-02-18 1:34 Kazu Hirata
2007-02-03 16:48 Kazu Hirata
2007-01-31 4:00 Kazu Hirata
2007-01-31 5:06 ` Brooks Moses
2006-12-22 12:50 Kazu Hirata
2006-12-22 1:20 Kazu Hirata
2006-12-22 1:25 ` Ian Lance Taylor
2006-12-05 8:27 Kazu Hirata
2006-12-02 2:26 Kazu Hirata
2006-12-02 14:26 ` Rask Ingemann Lambertsen
2006-10-29 10:32 Kazu Hirata
2006-06-30 19:34 Kazu Hirata
2006-05-28 19:10 Kazu Hirata
2006-04-22 16:20 Kazu Hirata
2006-04-22 16:32 ` Joseph S. Myers
2006-04-15 21:45 Kazu Hirata
2006-04-08 17:01 Kazu Hirata
2005-12-16 6:09 Kazu Hirata
2005-08-01 3:56 Kazu Hirata
2005-07-29 14:52 Kazu Hirata
2005-07-03 16:12 Kazu Hirata
2005-06-12 14:03 Kazu Hirata
2005-06-03 13:42 Kazu Hirata
2005-05-07 14:46 Kazu Hirata
2005-04-19 22:27 Kazu Hirata
2005-04-13 14:35 Kazu Hirata
2005-04-09 16:43 Kazu Hirata
2005-04-06 17:06 Kazu Hirata
2005-04-01 3:43 Kazu Hirata
2005-02-11 3:21 Kazu Hirata
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).