You can use itertools.groupby
here:
from itertools import groupby
with open('input.txt') as f1, open('f_out', 'w') as f2:
#Firstly group the data by the first column
for k, g in groupby(f1, key=lambda x:x.split()[0]):
# Now during the iteration over each group, we need to store only
# those lines that have unique 3rd and 4th column. For that we can
# use a `set()`, we store all the seen columns in the set as tuples and
# ignore the repeated columns.
seen = set()
for line in g:
columns = tuple(line.rsplit(None, 2)[-2:])
if columns not in seen:
#The 3rd and 4th column were unique here, so
# store this as seen column and also write it to the file.
seen.add(columns)
f2.write(line.rstrip() + '\n')
print line.rstrip()
Output:
con20 EMT20540 951 1580
con20 EMT14935 975 1655
con20 EMT19916 975 1652
con20 EMT09010 975 1649
con20 EMT09009 975 1637