Translate

Sample of InitATC.sap Oracle

# @(#) $Id: //bas/700_REL/src/ccm/rsbr/initNT.sap#15 $ SAP
########################################################################
#                                                                      #
# SAP BR*Tools sample profile.                                         #
# The parameter syntax is the same as for init.ora parameters.         #
# Enclose parameter values which consist of more than one symbol in    #
# double quotes.                                                       #
# After any symbol, parameter definition can be continued on the next  #
# line.                                                                #
# A parameter value list should be enclosed in parentheses, the list   #
# items should be delimited by commas.                                 #
# There can be any number of white spaces (blanks, tabs and new lines) #
# between symbols in parameter definition.                             #
# Comment lines must start with a hash character.                      #
#                                                                      #
########################################################################
# backup mode [all | all_data | full | incr | sap_dir | ora_dir
# | all_dir | <tablespace_name> | <file_id> | <file_id1>-<file_id2>
# | <generic_path> | (<object_list>)]
# default: all
backup_mode = all
# restore mode [all | all_data | full | incr | incr_only | incr_full
# | incr_all | <tablespace_name> | <file_id> | <file_id1>-<file_id2>
# | <generic_path> | (<object_list>) | partial | non_db
# redirection with '=' is not supported here - use option '-m' instead
# default: all
restore_mode = all
# backup type [offline | offline_force | offline_standby | offline_split
# | offline_mirror | offline_stop | online | online_cons | online_split
# | online_mirror | online_standby | offstby_split | offstby_mirror
# default: offline
backup_type = offline
# backup device type
# [tape | tape_auto | tape_box | pipe | pipe_auto | pipe_box | disk
# | disk_copy | disk_standby | stage | stage_copy | stage_standby
# | util_file | util_file_online | rman_util | rman_disk | rman_stage
# | rman_prep]
# default: tape
backup_dev_type = tape
# backup root directory [<path_name> | (<path_name_list>)]
# default: %SAPDATA_HOME%\sapbackup
backup_root_dir = R:\oracle\ATC\sapbackup
# stage root directory [<path_name> | (<path_name_list>)]
# default: value of the backup_root_dir parameter
stage_root_dir = R:\oracle\ATC\sapbackup
# compression flag [no | yes | hardware | only]
# default: no
compress = no
# compress command
# first $-character is replaced by the source file name
# second $-character is replaced by the target file name
# <target_file_name> = <source_file_name>.Z
# for compress command the -c option must be set
# recommended setting for brbackup -k only run:
# "%SAPEXE%\mkszip -l 0 -c $ > $"
# no default
compress_cmd = "S:\usr\sap\ATC\SYS\exe\uc\NTAMD64\mkszip -c $ > $"
# uncompress command
# first $-character is replaced by the source file name
# second $-character is replaced by the target file name
# <source_file_name> = <target_file_name>.Z
# for uncompress command the -c option must be set
# no default
uncompress_cmd = "S:\usr\sap\ATC\SYS\exe\uc\NTAMD64\uncompress -c $ > $"
# directory for compression [<path_name> | (<path_name_list>)]
# default: value of the backup_root_dir parameter
compress_dir = R:\oracle\ATC\sapreorg
# brarchive function [save | second_copy | double_save | save_delete
# | second_copy_delete | double_save_delete | copy_save
# | copy_delete_save | delete_saved | delete_copied]
# default: save
archive_function = save
# directory for archive log copies to disk
# default: first value of the backup_root_dir parameter
archive_copy_dir = R:\oracle\ATC\sapbackup
# directory for archive log copies to stage
# default: first value of the stage_root_dir parameter
archive_stage_dir = R:\oracle\ATC\sapbackup
# delete archive logs from duplex destination [only | no | yes | check]
# default: only
# archive_dupl_del = only
# new sapdata home directory for disk_copy | disk_standby
# no default
# new_db_home = X:\oracle\C11
# stage sapdata home directory for stage_copy | stage_standby
# default: value of the new_db_home parameter
# stage_db_home = /oracle/C11
# original sapdata home directory for split mirror disk backup
# no default
# orig_db_home = /oracle/C11
# remote host name
# no default
# remote_host = <host_name>
# remote user name
# default: current operating system user
# remote_user = <user_name>
# tape copy command [cpio | cpio_gnu | dd | dd_gnu | rman | rman_gnu
# | rman_dd | rman_dd_gnu]
# default: cpio
tape_copy_cmd = cpio
# disk copy command [copy | copy_gnu | dd | dd_gnu | rman | rman_gnu
# | rman_set | rman_set_gnu | ocopy]
# default: copy
disk_copy_cmd = copy
# stage copy command [rcp | scp | ftp]
# default: rcp
stage_copy_cmd = rcp
# pipe copy command [rsh | ssh]
# default: rsh
pipe_copy_cmd = rsh
# flags for cpio output command
# default: -ovB
cpio_flags = -ovB
# flags for cpio input command
# default: -iuvB
cpio_in_flags = -iuvB
# flags for cpio command for copy of directories to disk
# default: -pdcu
# use flags -pdu for gnu tools
cpio_disk_flags = -pdcu
# flags for dd output command
# default: "obs=16k"
# caution: option "obs=" not supported for Windows
# recommended setting:
# Unix:    "obs=nk bs=nk", example: "obs=64k bs=64k"
# Windows: "bs=nk",        example: "bs=64k"
dd_flags = "bs=64k"
# flags for dd input command
# default: "ibs=16k"
# caution: option "ibs=" not supported for Windows
# recommended setting:
# Unix:    "ibs=nk bs=nk", example: "ibs=64k bs=64k"
# Windows: "bs=nk",        example: "bs=64k"
dd_in_flags = "bs=64k"
# number of members in RMAN save sets [ 1 | 2 | 3 | 4 | tsp | all ]
# default: 1
saveset_members = 1
# additional parameters for RMAN
# rman_channels and rman_filesperset are only used when rman_util,
# rman_disk or rman_stage
# rman_channels defines the number of parallel sbt channel allocations
# rman_filesperset = 0 means:
# one file per save set - for non-incremental backups
# all files in one save set - for incremental backups
# the others have the same meaning as for native RMAN
# rman_channels = 1
# rman_filesperset = 0
# rman_maxpiecesize = 0   # in KB - former name rman_kbytes
# rman_rate = 0           # in KB - former name rman_readrate
# rman_maxopenfiles = 0
# rman_maxsetsize = 0     # in KB - former name rman_setsize
# additional parameters for RMAN version 8.1
# the parameters have the same meaning as for native RMAN
# rman_diskratio = 0      # deprecated in Oracle 10g
# rman_pool = 0
# rman_copies = 0 | 1 | 2 | 3 | 4 # former name rman_duplex
# rman_proxy = no | yes | only
# special parameters for an external backup library, example:
# rman_parms = "BLKSIZE=65536 ENV=(BACKUP_SERVER=HOSTNAME)"
# rman_send = "'<command>'"
# rman_send = ("channel sbt_1 '<command1>' parms='<parameters1>'",
#              "channel sbt_2 '<command2>' parms='<parameters2>'")
# rman_compress = no | yes
# rman_maxcorrupt = (<dbf_name>|<dbf_id>:<corr_cnt>, ...)
# remote copy-out command (backup_dev_type = pipe)
# $-character is replaced by current device address
# no default
copy_out_cmd = "dd ibs=8k obs=64k of=$"
# remote copy-in command (backup_dev_type = pipe)
# $-character is replaced by current device address
# no default
copy_in_cmd = "dd ibs=64k obs=8k if=$"
# rewind command
# $-character is replaced by current device address
# no default
# operating system dependent, examples:
# HP-UX:   "mt -f $ rew"
# TRU64:   "mt -f $ rewind"
# AIX:     "tctl -f $ rewind"
# Solaris: "mt -f $ rewind"
# Windows: "mt -f $ rewind"
# Linux:   "mt -f $ rewind"
rewind = "mt -f $ rewind"
# rewind and set offline command
# $-character is replaced by current device address
# default: value of the rewind parameter
# operating system dependent, examples:
# HP-UX:   "mt -f $ offl"
# TRU64:   "mt -f $ offline"
# AIX:     "tctl -f $ offline"
# Solaris: "mt -f $ offline"
# Windows: "mt -f $ offline"
# Linux:   "mt -f $ offline"
rewind_offline = "mt -f $ offline"
# tape positioning command
# first $-character is replaced by current device address
# second $-character is replaced by number of files to be skipped
# no default
# operating system dependent, examples:
# HP-UX:   "mt -f $ fsf $"
# TRU64:   "mt -f $ fsf $"
# AIX:     "tctl -f $ fsf $"
# Solaris: "mt -f $ fsf $"
# Windows: "mt -f $ fsf $"
# Linux:   "mt -f $ fsf $"
tape_pos_cmd = "mt -f $ fsf $"
# mount backup volume command in auto loader / juke box
# used if backup_dev_type = tape_box | pipe_box
# no default
# mount_cmd = "<mount_cmd> $ $ $ [$]"
# dismount backup volume command in auto loader / juke box
# used if backup_dev_type = tape_box | pipe_box
# no default
# dismount_cmd = "<dismount_cmd> $ $ [$]"
# split mirror disks command
# used if backup_type = offline_split | online_split | offline_mirror
# | online_mirror
# no default
# split_cmd = "<split_cmd> [$]"
# resynchronize mirror disks command
# used if backup_type = offline_split | online_split | offline_mirror
# | online_mirror
# no default
# resync_cmd = "<resync_cmd> [$]"
# additional options for SPLITINT interface program
# no default
# split_options = "<split_options>"
# resynchronize after backup flag [no | yes]
# default: no
# split_resync = no
# pre-split command
# no default
# pre_split_cmd = "<pre_split_cmd>"
# post-split command
# no default
# post_split_cmd = "<post_split_cmd>"
# pre-shut command
# no default
# pre_shut_cmd = "<pre_shut_cmd>"
# post-shut command
# no default
# post_shut_cmd = "<post_shut_cmd>"
# volume size in KB = K, MB = M or GB = G (backup device dependent)
# default: 1200M
# recommended values for tape devices without hardware compression:
# 60 m   4 mm  DAT DDS-1 tape:    1200M
# 90 m   4 mm  DAT DDS-1 tape:    1800M
# 120 m  4 mm  DAT DDS-2 tape:    3800M
# 125 m  4 mm  DAT DDS-3 tape:   11000M
# 112 m  8 mm  Video tape:        2000M
# 112 m  8 mm  high density:      4500M
# DLT 2000     10/20 GB:         10000M
# DLT 2000XT   15/30 GB:         15000M
# DLT 4000     20/40 GB:         20000M
# DLT 7000     35/70 GB:         35000M
# recommended values for tape devices with hardware compression:
# 60 m   4 mm  DAT DDS-1 tape:    1000M
# 90 m   4 mm  DAT DDS-1 tape:    1600M
# 120 m  4 mm  DAT DDS-2 tape:    3600M
# 125 m  4 mm  DAT DDS-3 tape:   10000M
# 112 m  8 mm  Video tape:        1800M
# 112 m  8 mm  high density:      4300M
# DLT 2000     10/20 GB:          9000M
# DLT 2000XT   15/30 GB:         14000M
# DLT 4000     20/40 GB:         18000M
# DLT 7000     35/70 GB:         30000M
tape_size = 100G
# volume size in KB = K, MB = M or GB = G used by brarchive
# default: value of the tape_size parameter
# tape_size_arch = 100G
# level of parallel execution
# default: 0 - set to number of backup devices
exec_parallel = 0
# address of backup device without rewind
# [<dev_address> | (<dev_address_list>)]
# no default
# operating system dependent, examples:
# HP-UX:   /dev/rmt/0mn
# TRU64:   /dev/nrmt0h
# AIX:     /dev/rmt0.1
# Solaris: /dev/rmt/0mn
# Windows: /dev/nmt0 | /dev/nst0
# Linux:   /dev/nst0
tape_address = /dev/nmt0
# address of backup device without rewind used by brarchive
# default: value of the tape_address parameter
# operating system dependent
# tape_address_arch = /dev/nmt0
# address of backup device with rewind
# [<dev_address> | (<dev_address_list>)]
# no default
# operating system dependent, examples:
# HP-UX:   /dev/rmt/0m
# TRU64:   /dev/rmt0h
# AIX:     /dev/rmt0
# Solaris: /dev/rmt/0m
# Windows: /dev/mt0 | /dev/st0
# Linux:   /dev/st0
tape_address_rew = /dev/mt0
# address of backup device with rewind used by brarchive
# default: value of the tape_address_rew parameter
# operating system dependent
# tape_address_rew_arch = /dev/mt0
# address of backup device with control for mount/dismount command
# [<dev_address> | (<dev_address_list>)]
# default: value of the tape_address_rew parameter
# operating system dependent
# tape_address_ctl = /dev/...
# address of backup device with control for mount/dismount command
# used by brarchive
# default: value of the tape_address_rew_arch parameter
# operating system dependent
# tape_address_ctl_arch = /dev/...
# volumes for brarchive
# [<volume_name> | (<volume_name_list>) | SCRATCH]
# no default
volume_archive = (ATCA01, ATCA02, ATCA03, ATCA04, ATCA05,
                  ATCA06, ATCA07, ATCA08, ATCA09, ATCA10,
                  ATCA11, ATCA12, ATCA13, ATCA14, ATCA15,
                  ATCA16, ATCA17, ATCA18, ATCA19, ATCA20,
                  ATCA21, ATCA22, ATCA23, ATCA24, ATCA25,
                  ATCA26, ATCA27, ATCA28, ATCA29, ATCA30)
# volumes for brbackup
# [<volume_name> | (<volume_name_list>) | SCRATCH]
# no default
volume_backup = (ATCB01, ATCB02, ATCB03, ATCB04, ATCB05,
                 ATCB06, ATCB07, ATCB08, ATCB09, ATCB10,
                 ATCB11, ATCB12, ATCB13, ATCB14, ATCB15,
                 ATCB16, ATCB17, ATCB18, ATCB19, ATCB20,
                 ATCB21, ATCB22, ATCB23, ATCB24, ATCB25,
                 ATCB26, ATCB27, ATCB28, ATCB29, ATCB30)
# expiration period for backup volumes in days
# default: 30
expir_period = 30
# recommended usages of backup volumes
# default: 100
tape_use_count = 100
# backup utility parameter file
# default: no parameter file
# util_par_file = initATC.utl
# mount/dismount command parameter file
# default: no parameter file
# mount_par_file = initATC.mnt
# Oracle connection name to the primary database
# [primary_db = <conn_name> | LOCAL]
# no default
# primary_db = <conn_name>
# Oracle connection name to the standby database
# [standby_db = <conn_name> | LOCAL]
# no default
# standby_db = <conn_name>
# description of parallel instances for Oracle RAC
# parallel_instances = <inst_desc> | (<inst_desc_list>)
# <inst_desc_list>   - <inst_desc>[,<inst_desc>...]
# <inst_desc>        - <Oracle_sid>:<Oracle_home>@<conn_name>
# <Oracle_sid>       - Oracle system id for parallel instance
# <Oracle_home>      - Oracle home for parallel instance
# <conn_name>        - Oracle connection name to parallel instance
# Please include the local instance in the parameter definition!
# default: no parallel instances
# example for initRAC001.sap:
# parallel_instances = (RAC001:/oracle/RAC/920_64@RAC001,
# RAC002:/oracle/RAC/920_64@RAC002, RAC003:/oracle/RAC/920_64@RAC003)
# handling of Oracle RAC database services [no | yes]
# default: no
# db_services = yes
# database owner of objects to be checked
# <owner> | (<owner_list>)
# default: all SAP owners
# check_owner = sapr3
# database objects to be excluded from checks
# all_part | non_sap | [<owner>.]<table> | [<owner>.]<index>
# | [<owner>.][<prefix>]*[<suffix>] | <tablespace> | (<object_list>)
# default: no exclusion, example:
# check_exclude = (SDBAH, SAPR3.SDBAD)
# special database check conditions
# ("<type>:<cond>:<active>:<sever>:[<chkop>]:[<chkval>]:[<unit>]", ...)
# check_cond = (<cond_list>)
# database owner of SDBAH, SDBAD and XDB tables for cleanup
# <owner> | (<owner_list>)
# default: all SAP owners
# cleanup_owner = sapr3
# retention period in days for brarchive log files
# default: 30
# cleanup_brarchive_log = 30
# retention period in days for brbackup log files
# default: 30
# cleanup_brbackup_log = 30
# retention period in days for brconnect log files
# default: 30
# cleanup_brconnect_log = 30
# retention period in days for brrestore log files
# default: 30
# cleanup_brrestore_log = 30
# retention period in days for brrecover log files
# default: 30
# cleanup_brrecover_log = 30
# retention period in days for brspace log files
# default: 30
# cleanup_brspace_log = 30
# retention period in days for archive log files saved on disk
# default: 30
# cleanup_disk_archive = 30
# retention period in days for database files backed up on disk
# default: 30
# cleanup_disk_backup = 30
# retention period in days for brspace export dumps and scripts
# default: 30
# cleanup_exp_dump = 30
# retention period in days for Oracle trace and audit files
# default: 30
# cleanup_ora_trace = 30
# retention period in days for records in SDBAH and SDBAD tables
# default: 100
# cleanup_db_log = 100
# retention period in days for records in XDB tables
# default: 100
# cleanup_xdb_log = 100
# retention period in days for database check messages
# default: 100
# cleanup_check_msg = 100
# database owner of objects to adapt next extents
# <owner> | (<owner_list>)
# default: all SAP owners
# next_owner = sapr3
# database objects to adapt next extents
# all | all_ind | special | [<owner>.]<table> | [<owner>.]<index>
# | [<owner>.][<prefix>]*[<suffix>] | <tablespace> | (<object_list>)
# default: all abjects of selected owners, example:
# next_table = (SDBAH, SAPR3.SDBAD)
# database objects to be excluded from adapting next extents
# all_part | [<owner>.]<table> | [<owner>.]<index>
# | [<owner>.][<prefix>]*[<suffix>] | <tablespace> | (<object_list>)
# default: no exclusion, example:
# next_exclude = (SDBAH, SAPR3.SDBAD)
# database objects to get special next extent size
# all_sel:<size>[/<limit>] | [<owner>.]<table>:<size>[/<limit>]
# | [<owner>.]<index>:<size>[/<limit>]
# | [<owner>.][<prefix>]*[<suffix>]:<size>[/<limit>]
# | (<object_size_list>)
# default: according to table category, example:
# next_special = (SDBAH:100K, SAPR3.SDBAD:1M/200)
# maximum next extent size
# default: 2 GB - 5 * <database_block_size>
# next_max_size = 1G
# maximum number of next extents
# default: 0 - unlimited
# next_limit_count = 300
# database owner of objects to update statistics
# <owner> | (<owner_list>)
# default: all SAP owners
# stats_owner = sapr3
# database objects to update statistics
# all | all_ind | all_part | missing | info_cubes | dbstatc_tab
# | dbstatc_mon | dbstatc_mona | [<owner>.]<table> | [<owner>.]<index>
# | [<owner>.][<prefix>]*[<suffix>] | <tablespace> | (<object_list>)
# | harmful | locked | system_stats | oradict_stats | oradict_tab
# default: all abjects of selected owners, example:
# stats_table = (SDBAH, SAPR3.SDBAD)
# database objects to be excluded from updating statistics
# all_part | info_cubes | [<owner>.]<table> | [<owner>.]<index>
# | [<owner>.][<prefix>]*[<suffix>] | <tablespace> | (<object_list>)
# default: no exclusion, example:
# stats_exclude = (SDBAH, SAPR3.SDBAD)
# method for updating statistics for tables not in DBSTATC
# E | EH | EI | EX | C | CH | CI | CX | A | AH | AI | AX | E= | C= | =H
# | =I | =X | +H | +I
# default: according to internal rules
# stats_method = E
# sample size for updating statistics for tables not in DBSTATC
# P<percentage_of_rows> | R<thousands_of_rows>
# default: according to internal rules
# stats_sample_size = P10
# number of buckets for updating statistics with histograms
# default: 75
# stats_bucket_count = 75
# threshold for collecting statistics after checking
# default: 50%
# stats_change_threshold = 50
# number of parallel threads for updating statistics
# default: 1
# stats_parallel_degree = 1
# processing time limit in minutes for updating statistics
# default: 0 - no limit
# stats_limit_time = 0
# parameters for calling DBMS_STATS supplied package
# all:R|B[<buckets>|A|S|R]:0|<degree>A|D
# | all_part:R|B[<buckets>|A|S|R]:0|<degree>A|D
# | info_cubes:R|B:A|D|0|<degree>
# | [<owner>.]<table>:R|B[<buckets>|A|S|R]:0|<degree>A|D
# | [<owner>.][<prefix>]*[<suffix>]:R|B[<buckets>|A|S|R]:0|<degree>A|D
# | (<object_list>) | NO
# R|B[<buckets>|A|S|R]:
# 'R' - row sampling, 'B' - block sampling,
# <buckets> - histogram buckets count, 'A' - auto buckets count,
# 'S' - skew only, 'R' - repeat
# <degree>A|D:
# <degree> - dbms_stats parallel degree, '0' - table degree,
# 'A' - auto degree, 'D' - default degree
# default: ALL:R:0
# stats_dbms_stats = ([ALL:R:1,][<owner>.]<table>:R:<degree>,...)
# definition of info cube tables
# default | rsnspace_tab | [<owner>.]<table>
# | [<owner>.][<prefix>]*[<suffix>] | (<object_list>) | null
# default: rsnspace_tab
# stats_info_cubes = (/BIC/D*, /BI0/D*, ...)
# special statistics settings
# (<table>:[<owner>]:<active>:[<method>]:[<sample>], ...)
# stats_special = (<special_list>)
# recovery type [complete | dbpit | tspit | reset | restore | apply
# | disaster]
# default: complete
# recov_type = complete
# directory for brrecover file copies
# default: $SAPDATA_HOME/sapbackup
# recov_copy_dir = R:\oracle\ATC\sapbackup
# time period for searching for backups
# 0 - all available backups, >0 - backups from n last days
# default: 30
# recov_interval = 30
# degree of paralelism for applying archive log files
# 0 - use Oracle default parallelism, 1 - serial, >1 - parallel
# default: Oracle default
# recov_degree = 0
# number of lines for scrolling in list menus
# 0 - no scrolling, >0 - scroll n lines
# default: 20
# scroll_lines = 20
# time period for displaying profiles and logs
# 0 - all available logs, >0 - logs from n last days
# default: 30
# show_period = 30
# directory for brspace file copies
# default: $SAPDATA_HOME/sapreorg
# space_copy_dir = R:\oracle\ATC\sapreorg
# directory for table export dump files
# default: $SAPDATA_HOME/sapreorg
# exp_dump_dir = R:\oracle\ATC\sapreorg
# database tables for reorganization
# [<owner>.]<table> | [<owner>.][<prefix>]*[<suffix>]
# | [<owner>.][<prefix>]%[<suffix>] | (<table_list>)
# no default
# reorg_table = (SDBAH, SAPR3.SDBAD)
# database indexes for rebuild
# [<owner>.]<index> | [<owner>.][<prefix>]*[<suffix>]
# | [<owner>.][<prefix>]%[<suffix>] | (<index_list>)
# no default
# rebuild_index = (SDBAH~0, SAPR3.SDBAD~0)
# database tables for export
# [<owner>.]<table> | [<owner>.][<prefix>]*[<suffix>]
# | [<owner>.][<prefix>]%[<suffix>] | (<table_list>)
# no default
# exp_table = (SDBAH, SAPR3.SDBAD)
# database tables for import
# <table> | (<table_list>)
# no default
# do not specify table owner in the list - use -o|-owner option for this
# imp_table = (SDBAH, SDBAD)

No comments:

Labels

sap hana hana database aws s4 hana hana db s4hana conversion steps sap hana azure bw4hana hana migration s4hana migration sap cloud migration steps sap hana migration steps sap hana migration to azure s4hana sap fiori fiori performance fiori erp s4 hana fiori sap fiori app sap fiori client sap fiori launchpad sap s4 hana fiori cisco ecc AI SAP AI abap dumps hana sap S/4HANA S/4HANA Conversion best sap ui5 & fiori training configuration database fiori tutorial on webide free sap ui5 & fiori training s/4 hana sap dumps sap fiori tutorial sap ui5 sap ui5 & fiori sap ui5 & fiori tutorial sara ui5cn 2367245 - Troubleshooting performance issues with SAP BPA Amazon free tier for SAP AWS setup Experience CALL_FUNCTION_NOT_FOUND CCMS Configuration and Use Create New Data Class in SAP (Oracle) Critical top SAP Abap dumps DHCP Clients Not Receiving IP Addresses Download Stack.xml HAN-DB HAN-DB-ENG High CPU Usage Due to Excessive Process Switching How To How to Start and Stop SAP Hana Tenant Database How to change SAP Hana Sql Output results are limited to 5000 Records How to perform SAP Dual Stack Split - Netweaver Inactive Objects in SAP Intercompany transactions in SAP AP / AR : Cross Company Code Transaction Interface Flapping Due to Duplex Mismatch KBA LOAD_PROGRAM_LOST MSSQL shrinking transaction log file Migrating to SAP hana database NAT Overload Causing Internet Access Failure Note 500235 - Network Diagnosis with NIPING OSPF Adjacency Not Forming PRINCE2 Foundation Sample Questions Preparing for S/4HANA Conversion and the MUST know items Push to Download Basket S/4HANA Migration Cockpit S/4JANA SAP BI Support Data Load Errors and Solutions SAP BI/BW Landscape SAP BPA SAP Basis SAP Basis Automation SAP Business Objects SAP CPS SAP Certification SAP FI Certification SAP FI Certification Sample Questions SAP HANA Admin - Cockpit SAP HANA DB Engines SAP HANA Database SAP HANA terminate session connection disconnect cancel kill hang stuck SAP Hana DB restore SAP Hana Numeric Error Codes SAP Landscape SAP Language installation SAP MM and Purchase Order Tables SAP Maintenance Planner SAP Note 500235 SAP R/3 Glossary SAP Readiness Check SAP S/4HANA 1709 Installation Files SAP S/4HANA 2023 SAP S/4HANA 2023 Installation SAP S/4HANA 2023 running SAP S/4HANA Installation SAP Scheduling SAP Solman 7.2 CHARM: SAP Support Package Stack Strategy SAP Support package SAP Upgrade SAP support stack upgrade SP stacks STORAGE_PARAMETERS_WRONG_SET SUSE/SLES/Kernel versions Setup of S/4hana 2023 TSV_TNEW_PAGE_ALLOC_FAILED TSV_TNEW_PAGE_ALLOC_FAILED error Transaction ID Unable to download an SAP Note Unix/Linux Command That Are Helpful For SAP Basis Upgrading SAP Kernel Without Downtime Upgrading windows server 2008 to windows server 2019 What is OSS Notes? SAP SNOTE Tutorial accounting agile ale idoc ale/edi archive FI documents audit auditing auditor aws aws cloud basic type bluefield approach ccms ccmsidb charm copilot datavard dbacockpit download sap note download snote edi idoc electronic data interchange enable sap archiving objects erpprep ffid firefighter fraud functional hana admin how to apply sap security note https://www.erpprep.com/ idoc install install sap fiori installation interfaces intermediate document internal control license key linux version materials management messsage niping test order type port prince2 agile prince2 agile practitioner purchasing quick info s4 hana sap abap dumps sap abbreviations sap activate certification sap activate project manager sap authorization sap aws sap brownfield sap ccms sap ccms configuration sap erp sap error sap grc sap greenfield sap internet demo system sap license sap maintenance certificate sap material management sap meaning sap mm sap mm consultant sap monthly security note sap netweaver sap network diagnostic sap niping sap note sap oss sap patch day sap performance sap performance issue sap purchase order sap s/4hana sap sales and distribution sap sap otc sap sd sap sd certification training sap sd course sap sd jobs sap sd module sap sd online training sap sd training sap sd tutorial sap sd tutorial for beginners sap security sap security note sap snote sap snote tutorial sap solution manager sap sql segregation of duties separation of duties sles slicense smc snote snote in sap system sod conflict solution manager solution maneger stop start hana database suse linux techie trex two step upgrade required waterfall