Pergunta

I am relatively new to [R] and am looking for the best way to calculate a frequency distribution from a vector (most likely numeric but not always) complete with the Frequency, Relative Frequency, Cumulative Frequency, Cumulative Relative Frequency for each value. Below is the logic I came up with, but it seems like a bit much for such a routine task. I appreciate your feedback.

x <- c(1,2,3,2,4,2,5,4,6,7,8,9)

freq <- data.frame(table(x))

relFreq <- data.frame(prop.table(table(x)))
relFreq$Relative_Freq <- relFreq$Freq
relFreq$Freq <- NULL

Cumulative_Freq <- cumsum(table(x))

z <- cbind(merge(freq, relFreq), Cumulative_Freq)
z$Cumulative_Relative_Freq <- z$Cumulative_Freq / sum(z$Freq)

print(z)
Foi útil?

Solução 2

I don't know your exact application but it seems unnecessary to show the data multiple times for each repeated x value. If this is not needed, you can avoid the merge

x <- c(1,2,3,2,4,2,5,4,6,7,8,9)
Freq <- table(x)
relFreq <- prop.table(Freq)
Cumulative_Freq <- cumsum(Freq)
Cumulative_Relative_Freq <- cumsum(relFreq)
data.frame(xval = names(Freq), Freq=Freq, relFreq=relFreq, 
                Cumulative_Freq=Cumulative_Freq, 
                Cumulative_Relative_Freq=Cumulative_Relative_Freq)

Another way to accomplish the same thing:

require(plyr)
x <- c(1,2,3,2,4,2,5,4,6,7,8,9)
z <- data.frame(table(x))
mutate(z, relFreq = prop.table(Freq), Cumulative_Freq = cumsum(Freq), 
               Cumulative_Relative_Freq = cumsum(relFreq))

Outras dicas

The qdap package contains dist_tab to do this:

library(qdap)
dist_tab(x)

##   interval Freq cum.Freq percent cum.percent
## 1        1    1        1    8.33        8.33
## 2        2    3        4   25.00       33.33
## 3        3    1        5    8.33       41.67
## 4        4    2        7   16.67       58.33
## 5        5    1        8    8.33       66.67
## 6        6    1        9    8.33       75.00
## 7        7    1       10    8.33       83.33
## 8        8    1       11    8.33       91.67
## 9        9    1       12    8.33      100.00

Try fdth package:

library(fdth)

tb1 <- fdt(x) # The breaks are based on the Sturges criterion (by default)
summary(tb1)
# Class limits f   rf rf(%) cf  cf(%)
#  [0.99,2.61) 4 0.33 33.33  4  33.33
#  [2.61,4.23) 3 0.25 25.00  7  58.33
#  [4.23,5.85) 1 0.08  8.33  8  66.67
#  [5.85,7.47) 2 0.17 16.67 10  83.33
#  [7.47,9.09) 2 0.17 16.67 12 100.00

tb2 <- fdt(x, start=1, end=9, h=1)
summary(tb2)
# Class limits f   rf rf(%) cf cf(%)
#        [1,2) 1 0.08  8.33  1  8.33
#        [2,3) 3 0.25 25.00  4 33.33
#        [3,4) 1 0.08  8.33  5 41.67
#        [4,5) 2 0.17 16.67  7 58.33
#        [5,6) 1 0.08  8.33  8 66.67
#        [6,7) 1 0.08  8.33  9 75.00
#        [7,8) 1 0.08  8.33 10 83.33
#        [8,9) 1 0.08  8.33 11 91.67

I found this formula to be simple for "frequency distribution" table:

   frequency =c(9,26,11,13,3,1,2)
   # of classes
   n=7
   # of boundaries = all lower limits + one more
   table1 =data.frame(frequency distribution)
   table1
                    frequency distribution
          1                      9
          2                     26
          3                     11
          4                     13
          5                      3
          6                      1
          7                      2

for frequency distribution of a variable with excessive values you can collapse down the values in classes,

Here I excessive values for employrate variable, and there's no meaning of it's frequency distribution with direct values_count(normalize=True)

                country  employrate alcconsumption
0           Afghanistan   55.700001            .03
1               Albania   11.000000           7.29
2               Algeria   11.000000            .69
3               Andorra         nan          10.17
4                Angola   75.699997           5.57
..                  ...         ...            ...
208             Vietnam   71.000000           3.91
209  West Bank and Gaza   32.000000               
210         Yemen, Rep.   39.000000             .2
211              Zambia   61.000000           3.56
212            Zimbabwe   66.800003           4.96

[213 rows x 3 columns]

frequency distribution with values_count(normalize=True) with no classification,length of result here is 139 (seems meaningless as a frequency distribution):

print(gm["employrate"].value_counts(sort=False,normalize=True))

50.500000   0.005618
61.500000   0.016854
46.000000   0.011236
64.500000   0.005618
63.500000   0.005618

58.599998   0.005618
63.799999   0.011236
63.200001   0.005618
65.599998   0.005618
68.300003   0.005618
Name: employrate, Length: 139, dtype: float64

putting classification we put all values with a certain range ie.

0-10 as 1,
11-20 as 2  
21-30 as 3, and so forth.
gm["employrate"]=gm["employrate"].str.strip().dropna()  
gm["employrate"]=pd.to_numeric(gm["employrate"])
gm['employrate'] = np.where(
   (gm['employrate'] <=10) & (gm['employrate'] > 0) , 1, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=20) & (gm['employrate'] > 10) , 1, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=30) & (gm['employrate'] > 20) , 2, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=40) & (gm['employrate'] > 30) , 3, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=50) & (gm['employrate'] > 40) , 4, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=60) & (gm['employrate'] > 50) , 5, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=70) & (gm['employrate'] > 60) , 6, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=80) & (gm['employrate'] > 70) , 7, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=90) & (gm['employrate'] > 80) , 8, gm['employrate']
   )
gm['employrate'] = np.where(
   (gm['employrate'] <=100) & (gm['employrate'] > 90) , 9, gm['employrate']
   )
print(gm["employrate"].value_counts(sort=False,normalize=True))

after classification, we have a clear frequency distribution. here we can easily see, that 37.64% of countries have employ rate between 51-60% and 11.79% of countries have employ rate between 71-80%

5.000000   0.376404
7.000000   0.117978
4.000000   0.179775
6.000000   0.264045
8.000000   0.033708
3.000000   0.028090
Name: employrate, dtype: float64

or you can do it this way as well,

gm.loc[(gm['employrate'] <50) & (gm['employrate'] > 40),'employrate']=4

here informal syntax can be:

<dataset>.loc[<filter1> & (<filter2>),'<variable>']='<value>'
Licenciado em: CC-BY-SA com atribuição
Não afiliado a StackOverflow
scroll top