Knowing when, and why, cronjobs fail can be really difficult. My solution is to use a wrapper script that will archive all successful cron job runs for some time and send email to admin staff when something fails.
Both the successful and failed runs will leave a trace file to disk that can be read to understand what the job did and when. At failure the trace is sent to admin staff, so that they do not need to login to server to get initial idea what should be done if anything. Notice that when things fail it is usually best to read the notification email from bottom to top.
#!/bin/bash
#
# Description of your script.
#
# <Begin do not touch header>
# Default settings.
# The trap ERR and pipefail are bashisms, do not change shebang.
JOB_NAME="${0##*/}"
set -e
trap 'echo "${JOB_NAME}: exit on error"; exit 1' ERR
set -u
set -o pipefail
# Report errors by email only when shell is not interactive.
if [ -t 1 ]; then
# Terminal exists, user is running script manually. Do
# not initiate error reporting.
trap 'echo "${0}: exit on error"; exit 1' ERR
else
OUTPUTFILE=$(mktemp "/tmp/${JOB_NAME}.XXXXXXXXX")
exec > "${OUTPUTFILE}" 2>&1
set -x
MAILTO="system-admin-maillist@example.com"
CRONJOBLOGDIR="/tmp/${JOB_NAME}"
SERVER=$(hostname -s)
TIMESTAMP=$(date --iso=ns)
trap "cat "${OUTPUTFILE}" |
mailx -s \"${SERVER}: ${JOB_NAME} failed\" ${MAILTO}" ERR
trap "mv \"${OUTPUTFILE}\" \"${CRONJOBLOGDIR}/${JOB_NAME}.${TIMESTAMP}\"" 0
if [ ! -d "${CRONJOBLOGDIR}" ]; then mkdir -p "${CRONJOBLOGDIR}"; fi
find "${CRONJOBLOGDIR}" -name "${JOB_NAME}.*" -type f -mtime +7 -delete
fi
# </End do not touch header>
#
# WRITE YOUR SCRIPT HERE
#
exit 0
# EOF