Attachment 'e2spt_classaverage_patch.py'

Download

   1 #!/usr/bin/python2.7
   2 
   3 #
   4 # Author: Jesus Galaz-Montoya 03/2011, 
   5 # (based on Steven Ludtke's initial implementation [02/15/2011] of Jesus's older scripts, from M.F.Schmid's methods).
   6 # Last modification: July/08/2015
   7 #
   8 # Copyright (c) 2011 Baylor College of Medicine
   9 #
  10 # This software is issued under a joint BSD/GNU license. You may use the
  11 # source code in this file under either license. However, note that the
  12 # complete EMAN2 and SPARX software packages have some GPL dependencies,
  13 # so you are responsible for compliance with the licenses of these packages
  14 # if you opt to use BSD licensing. The warranty disclaimer below holds
  15 # in either instance.
  16 #
  17 # This complete copyright notice must be included in any revised version of the
  18 # source code. Additional authorship citations may be added, but existing
  19 # author citations must be preserved.
  20 #
  21 # This program is free software; you can redistribute it and/or modify
  22 # it under the terms of the GNU General Public License as published by
  23 # the Free Software Foundation; either version 2 of the License, or
  24 # (at your option) any later version.
  25 #
  26 # This program is distributed in the hope that it will be useful,
  27 # but WITHOUT ANY WARRANTY; without even the implied warranty of
  28 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  29 # GNU General Public License for more details.
  30 #
  31 # You should have received a copy of the GNU General Public License
  32 # along with this program; if not, write to the Free Software
  33 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  2111-1307 USA
  34 #
  35 #
  36 
  37 from EMAN2 import *
  38 import math
  39 import numpy
  40 from copy import deepcopy
  41 import os
  42 import sys
  43 import random
  44 from random import choice
  45 from pprint import pprint
  46 from EMAN2jsondb import JSTask,jsonclasses
  47 import datetime
  48 import gc 	#this will be used to free-up unused memory
  49 
  50 from e2spt_hac import textwriter
  51 
  52 
  53 
  54 
  55 def main():
  56 	progname = os.path.basename(sys.argv[0])
  57 	usage = """prog <output> [options]
  58 
  59 	This program produces iterative class-averages akin to those generated by e2classaverage, 
  60 	but for stacks of 3-D Volumes.
  61 	Normal usage is to provide a stack of particle volumes and a classification matrix file 
  62 	(if you have more than one class) defining class membership. 
  63 	Members of each class are then iteratively aligned to each other (within the class) and 
  64 	averaged together. 
  65 	It is also possible to use this program on all of the volumes in a single stack without
  66 	providing a classification matrix.
  67 
  68 	Specify preprocessing options through --lowpass, --highpass, --mask, --normproc, --thresh, 
  69 	--preprocess and --shrink. These take EMAN2 processors (to see a list, type e2help.py processors at
  70 	the command line).
  71 	
  72 	Notice that the alignment is broken down into two step: 1) Coarse alignment and 2) Fine 
  73 	alignment. This is done for speed optimization. By default, the particles are preprocessed
  74 	THE SAME was for Coarse and Fine alignment, unless you supply --notprocfinelinecoarse.
  75 	In this case, the particles will be preprocessed with default parameters for fine alignment.
  76 	To specify or inactivate any preprocessing for fine alignment, do so through fine 
  77 	alignment parameters:
  78 	--lowpassfine, --highpassfine, --preprocessfine and --shrinkfine.
  79 	
  80 	"""
  81 			
  82 	parser = EMArgumentParser(usage=usage,version=EMANVERSION)
  83 	
  84 	parser.add_header(name="caheader", help="""Options below this label are specific to sptclassaverage""", title="### sptclassaverage options ###", row=3, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
  85 	
  86 	parser.add_argument("--path",type=str,default='spt',help="""Default=spt. Directory to store results in. The default is a numbered series of directories containing the prefix 'spt'; for example, spt_02 will be the directory by default if 'spt_01' already exists.""")
  87 	
  88 	parser.add_argument("--input", type=str, default='',help="""Default=None. The name of the input volume stack. MUST be HDF since volume stack support is required.""", guitype='filebox', browser='EMSubTomosTable(withmodal=True,multiselect=False)', row=0, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
  89 	
  90 	#parser.add_argument("--output", type=str, default='avg.hdf', help="""Default=avg.hdf. The name of the output class-average stack. MUST be HDF since volume stack support is required.""", guitype='strbox', row=2, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
  91 	
  92 	parser.add_argument("--radius", type=float, default=0, help="""Default=0 (which means it's not used by default). Hydrodynamic radius of the particle in Angstroms. This will be used to automatically calculate the angular steps to use in search of the best alignment. Make sure the apix is correct on the particles' headers, sine the radius will be converted from Angstroms to pixels. Then, the fine angular step is equal to 360/(2*pi*radius), and the coarse angular step 4 times that.""")
  93 	
  94 	parser.add_argument("--precision",type=float,default=1.0,help="""Default=1.0. Precision in pixels to use when figuring out alignment parameters automatically using --radius. Precision would be the number of pixels that the the edge of the specimen is moved (rotationally) during the finest sampling, --falign. If precision is 1, then the precision of alignment will be that of the sampling (apix of your images) times the --shrinkfine factor specified.""")
  95 	
  96 	parser.add_argument("--search", type=int,default=8,help=""""Default=8. During COARSE alignment, translational search in X, Y and Z, in pixels. This WILL overwrite any search: provided through --align, EXCEPT if you provide --search=8, which is the default. In general, just avoid providing search twice (through here and through the aligner, --align). If you do, just be careful to make them consistent to minimize misinterpretation and error.""")
  97 	
  98 	#parser.add_argument("--searchx", type=int,default=8,help=""""Default=0. Not used. During COARSE alignment translational search in X, Y and Z, in pixels. Default=8. This WILL overwrite any search: provided through --align, EXCEPT if you provide --search=8, which is the default. In general, just avoid providing search twice (through here and through the aligner, --align). If you do, just be careful to make them consistent to minimize misinterpretation and error.""")
  99 
 100 	#parser.add_argument("--searchy", type=int,default=8,help=""""Default=8. During COARSE alignment translational search in X, Y and Z, in pixels. Default=8. This WILL overwrite any search: provided through --align, EXCEPT if you provide --search=8, which is the default. In general, just avoid providing search twice (through here and through the aligner, --align). If you do, just be careful to make them consistent to minimize misinterpretation and error.""")
 101 
 102 	#parser.add_argument("--searchz", type=int,default=8,help=""""Default=8. During COARSE alignment translational search in X, Y and Z, in pixels. Default=8. This WILL overwrite any search: provided through --align, EXCEPT if you provide --search=8, which is the default. In general, just avoid providing search twice (through here and through the aligner, --align). If you do, just be careful to make them consistent to minimize misinterpretation and error.""")
 103 	
 104 	parser.add_argument("--searchfine", type=int,default=2,help=""""Default=2. During FINE alignment translational search in X, Y and Z, in pixels. Default=2. This WILL overwrite any search: provided through --falign, EXCEPT if you provide --searchfine=2, which is the default. In general, just avoid providing search twice (through here and through the fine aligner --falign). If you do, just be careful to make them consistent to minimize misinterpretation and error.""")
 105 		
 106 	parser.add_argument("--iter", type=int, default=1, help="""Default=1. The number of iterations to perform.""", guitype='intbox', row=5, col=0, rowspan=1, colspan=1, nosharedb=True, mode='alignment,breaksym')
 107 	
 108 	parser.add_argument("--savesteps",action="store_true", default=False, help="""Default=False. If set, this will save the average after each iteration to class_#.hdf. Each class in a separate file. Appends to existing files.""", guitype='boolbox', row=4, col=0, rowspan=1, colspan=1, mode='alignment,breaksym')
 109 	
 110 	parser.add_argument("--saveali",action="store_true", default=False, help="""Default=False. If set, this will save the aligned particle volumes in class_ptcl.hdf. Overwrites existing file.""", guitype='boolbox', row=4, col=1, rowspan=1, colspan=1, mode='alignment,breaksym')
 111 	
 112 	parser.add_argument("--saveallalign",action="store_true", default=False, help="""Default=False. If set, this will save the alignment parameters after each iteration""", guitype='boolbox', row=4, col=2, rowspan=1, colspan=1, mode='alignment,breaksym')
 113 	
 114 	parser.add_argument("--saveallpeaks",action="store_true", default=False, help="""Default=False. If set, this will save the alignment information and score for all examined peaks --npeakstorefine during coarse alignment.""")
 115 	
 116 	parser.add_argument("--sym", type=str,dest = "sym", default='', help = """Default=None (equivalent to c1). Symmetry to impose -choices are: c<n>, d<n>, h<n>, tet, oct, icos""", guitype='symbox', row=9, col=1, rowspan=1, colspan=2, mode='alignment,breaksym')
 117 	
 118 	parser.add_argument("--mask",type=str,default="mask.soft:outer_radius=-4", help="""Default is mask.soft:outer_radius=-4. Masking processor applied to particles before alignment. IF using --clipali, make sure to express outer mask radii as negative pixels from the edge.""", returnNone=True, guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'mask\')', row=11, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
 119 	
 120 	parser.add_argument("--maskfile",type=str,default='',help="""Default=None. Mask file (3D IMAGE) applied to particles before alignment. Must be in HDF format. Default is None.""")
 121 	
 122 	parser.add_argument("--normproc",type=str, default='normalize.edgemean',help="""Default is 'normalize.edgemean' (see 'e2help.py processors -v 10' at the command line). Normalization processor applied to particles before alignment. If normalize.mask is used, results of the mask option will be passed in automatically. If you want to turn this option off specify \'None\'""")
 123 	
 124 	parser.add_argument("--threshold",type=str,default='',help="""Default=None. A threshold applied to the subvolumes after normalization. For example, --threshold=threshold.belowtozero:minval=0 makes all negative pixels equal 0, so that they do not contribute to the correlation score.""", guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=10, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
 125 	
 126 	parser.add_argument("--preprocess",type=str,default='',help="""Any processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""", guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=10, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
 127 	
 128 	parser.add_argument("--preprocessfine",type=str,default='',help="""Any processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging.""")
 129 	
 130 	parser.add_argument("--lowpass",type=str,default='filter.lowpass.gauss:cutoff_freq=0.05',help="""Default=filter.lowpass.gauss:cutoff_freq=0.05. A lowpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""", guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=17, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
 131 	
 132 	parser.add_argument("--lowpassfine",type=str,default='filter.lowpass.gauss:cutoff_freq=0.1',help="""Default=filter.lowpass.gauss:cutoff_freq=0.1. A lowpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging.""")
 133 
 134 	parser.add_argument("--highpass",type=str,default='',help="""Default=None. A highpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to COARSE alignment. Not applied to aligned particles before averaging.""", guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=18, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
 135 	
 136 	parser.add_argument("--highpassfine",type=str,default='',help="""Default=None. A highpass filtering processor (see 'e2help.py processors -v 10' at the command line) to be applied to each volume prior to FINE alignment. Not applied to aligned particles before averaging.""")
 137 
 138 	parser.add_argument("--shrink", type=int,default=1,help="""Default=1 (no shrinking). Optionally shrink the input volumes by an integer amount for coarse alignment.""", guitype='shrinkbox', row=5, col=1, rowspan=1, colspan=1, mode='alignment,breaksym')
 139 	
 140 	parser.add_argument("--shrinkfine", type=int,default=1,help="""Default=1 (no shrinking). Optionally shrink the input volumes by an integer amount for refine alignment.""", guitype='intbox', row=5, col=2, rowspan=1, colspan=1, mode='alignment')
 141 	
 142 	parser.add_argument("--clipali",type=int,default=0,help="""Default=0 (which means it's not used). Boxsize to clip particles as part of preprocessing to speed up alignment. For example, the boxsize of the particles might be 100 pixels, but the particles are only 50 pixels in diameter. Aliasing effects are not always as deleterious for all specimens, and sometimes 2x padding isn't necessary; still, there are some benefits from 'oversampling' the data during averaging; so you might still want an average of size 2x, but perhaps particles in a box of 1.5x are sufficiently good for alignment. In this case, you would supply --clipali=75""")
 143 	
 144 	parser.add_argument("--postprocess",type=str,default='',help="""Default=None. A processor to be applied to the FINAL volume after averaging the raw volumes in their FINAL orientations, after all iterations are done.""",guitype='comboparambox', choicelist='re_filter_list(dump_processors_list(),\'filter\')', row=16, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
 145 	
 146 	parser.add_argument("--procfinelikecoarse",action='store_true',default=False,help="""If you supply this parameters, particles for fine alignment will be preprocessed identically to particles for coarse alignment by default. If you supply this, but want specific parameters for preprocessing particles for also supply: fine alignment, nd supply fine alignment parameters, such as --lowpassfine, --highpassfine, etc; to preprocess the particles for FINE alignment differently than for COARSE alignment.""")
 147 	
 148 	parser.add_argument("--npeakstorefine", type=int, help="""Default=1. The number of best coarse alignments to refine in search of the best final alignment.""", default=1, guitype='intbox', row=9, col=0, rowspan=1, colspan=1, nosharedb=True, mode='alignment,breaksym[1]')
 149 	
 150 	parser.add_argument("--align",type=str,default="rotate_translate_3d:search=8:delta=16:dphi=16",help="""This is the aligner used to align particles to the previous class average. Default is rotate_translate_3d:search=8:delta=12:dphi=12, specify 'None' (with capital N) to disable.""", returnNone=True,guitype='comboparambox', choicelist='re_filter_list(dump_aligners_list(),\'3d\')', row=12, col=0, rowspan=1, colspan=3, nosharedb=True, mode="alignment,breaksym['rotate_symmetry_3d']")
 151 	
 152 	parser.add_argument("--aligncmp",type=str,default="ccc.tomo",help="""Default=ccc.tomo. The comparator used for the --align aligner. Do not specify unless you need to use anotherspecific aligner.""",guitype='comboparambox',choicelist='re_filter_list(dump_cmps_list(),\'tomo\')', row=13, col=0, rowspan=1, colspan=3,mode="alignment,breaksym")
 153 
 154 
 155 	#parser.add_argument("--ialign",type=str,default="refine_3d_grid:delta=4:range=8:search=4",help="""Default="refine_3d_grid:delta=3:range=8:search=4". This is the second stage aligner used for intermediate alignment. Specify 'None' to disable.""", returnNone=True, guitype='comboparambox', choicelist='re_filter_list(dump_aligners_list(),\'refine.*3d\')', row=14, col=0, rowspan=1, colspan=3, nosharedb=True, mode='alignment,breaksym[None]')
 156 			
 157 	#parser.add_argument("--ialigncmp",type=str,default="ccc.tomo",help="""Default=ccc.tomo. The comparator used by the second stage aligner.""", guitype='comboparambox', choicelist='re_filter_list(dump_cmps_list(),\'tomo\')', row=15, col=0, rowspan=1, colspan=3,mode="alignment,breaksym")
 158 
 159 	
 160 	parser.add_argument("--falign",type=str,default="refine_3d_grid:delta=2:range=4:search=2",help="""Default="refine_3d_grid:delta=2:range=4:search=2". This is the second stage aligner used to fine-tune the first alignment. Specify 'None' to disable.""", returnNone=True, guitype='comboparambox', choicelist='re_filter_list(dump_aligners_list(),\'refine.*3d\')', row=14, col=0, rowspan=1, colspan=3, nosharedb=True, mode='alignment,breaksym[None]')
 161 			
 162 	parser.add_argument("--faligncmp",type=str,default="ccc.tomo",help="""Default=ccc.tomo. The comparator used by the second stage aligner.""", guitype='comboparambox', choicelist='re_filter_list(dump_cmps_list(),\'tomo\')', row=15, col=0, rowspan=1, colspan=3,mode="alignment,breaksym")		
 163 
 164 	parser.add_argument("--translateonly",action='store_true',default=False,help="""Default=False. This will force the aligner to not do any rotations and thus serves for translational centering. Specify search values through --search, otherwise its default value will be used.""")	
 165 		
 166 	parser.add_argument("--filterbyfsc",action='store_true',default=False,help="""If on, this parameter will use dynamic FSC filtering. --lowpass will be used to build initial references if no --ref supplied, then, the FSC between the even and odd initial references will be used to filter the data during preprocessing. If --ref is supplied, --lowpass will be used during the first iteration to align the particles against the reference. Thereafter, the FSC between the most current particle average and the original reference (--ref) will be used in the next iteration.""")
 167 	
 168 	parser.add_argument("--averager",type=str,default="mean.tomo",help="""Default=mean.tomo. The type of averager used to produce the class average. Default=mean.tomo.""")
 169 	
 170 	parser.add_argument("--inputaliparams",type=str,default="",help="""Default=None. .json file containing a dict of transforms to apply to 'pre-align' the particles.""", guitype='dirbox', dirbasename='spt_|sptsym_', row=7, col=0,rowspan=1, colspan=2, nosharedb=True, mode='breaksym')
 171 	
 172 	parser.add_argument("--breaksym",action="store_true", default=False,help="""Default=False. Break symmetry. Do not apply symmetrization after averaging, even if searching the asymmetric unit provided through --sym only for alignment. Default=False""", guitype='boolbox', row=7, col=2, rowspan=1, colspan=1, nosharedb=True, mode=',breaksym[True]')
 173 	
 174 	#parser.add_argument("--groups",type=int,default=0,help="""Default=0 (not used; data not split). This parameter will split the data into a user defined number of groups. For purposes of gold-standard FSC computation later, select --group=2.""")
 175 		
 176 	parser.add_argument("--randomizewedge",action="store_true",  default=False,help="""Default=False. This parameter is EXPERIMENTAL. It randomizes the position of the particles BEFORE alignment, to minimize missing wedge bias and artifacts during symmetric alignment where only a fraction of space is scanned""")
 177 	
 178 	parser.add_argument("--savepreprocessed",action="store_true",  default=False,help="""Default=False. Will save stacks of preprocessed particles (one for coarse alignment and one for fine alignment if preprocessing options are different).""")
 179 	
 180 	parser.add_argument("--autocenter",type=str, default='',help="""WARNING: Experimental. Default=None. Autocenters each averaged pair during initial average generation with --btref and --hacref. Will also autocenter the average of all particles after each iteration of iterative refinement. Options are --autocenter=xform.centerofmass (self descriptive), or --autocenter=xform.centeracf, which applies auto-convolution on the average.""")
 181 	
 182 	parser.add_argument("--autocentermask",type=str, default='',help="""WARNING: Experimental. Requires --autocenter. Default=None. Masking processor to apply before autocentering. See 'e2help.py processors -v 10' at the command line.""")
 183 	
 184 	parser.add_argument("--autocenterpreprocess",action='store_true', default=False,help="""WARNING: Experimental. Requires --autocenter. Default=False. This will apply a highpass filter at a frequency of half the box size times the apix, shrink by 2, and apply a low pass filter at half nyquist frequency to any computed average for autocentering purposes if --autocenter is provided. Default=False.""")
 185 	
 186 	parser.add_argument("--parallel",default="thread:1",help="""default=thread:1. Parallelism. See http://blake.bcm.edu/emanwiki/EMAN2/Parallel""", guitype='strbox', row=19, col=0, rowspan=1, colspan=3, mode='alignment,breaksym')
 187 	
 188 	parser.add_argument("--ppid", type=int, help="""Default=-1. Set the PID of the parent process, used for cross platform PPID""",default=-1)
 189 	
 190 	parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, default=0, help="""Default=0. Verbose level [0-9], higner number means higher level of verboseness""")
 191 		
 192 	parser.add_argument("--resume",type=str,default='',help="""(Not working currently). sptali_ir.json file that contains alignment information for the particles in the set. If the information is incomplete (i.e., there are less elements in the file than particles in the stack), on the first iteration the program will complete the file by working ONLY on particle indexes that are missing. For subsequent iterations, all the particles will be used.""")
 193 															
 194 	parser.add_argument("--plots", action='store_true', default=False,help="""Default=False. Turn this option on to generate a plot of the ccc scores during each iteration in.png format (otherwise only .txt files will be saved). This option will also produce a plot of mean ccc score across iterations. Running on a cluster or via ssh remotely might not support plotting.""")
 195 
 196 	parser.add_argument("--subset",type=int,default=0,help="""Default=0 (not used). Refine only this substet of particles from the stack provided through --input""")
 197 
 198 	parser.add_argument("--apix",type=float,default=0.0,help="""Default=0.0 (not used). Use this apix value where relevant instead of whatever is in the header of the reference and the particles.""")
 199 	
 200 	parser.add_argument("--notmatchimgs",action='store_true',default=False,help="""Default=True. This option prevents applying filter.match.to to one image so that it matches the other's spectral profile during preprocessing for alignment purposes.""")
 201 	
 202 	parser.add_argument("--preavgproc1",type=str,default='',help="""Default=None. A processor (see 'e2help.py processors -v 10' at the command line) to be applied to the raw particle after alignment but before averaging (for example, a threshold to exclude extreme values, or a highphass filter if you have phaseplate data.)""")
 203 	
 204 	parser.add_argument("--preavgproc2",type=str,default='',help="""Default=None. A processor (see 'e2help.py processors -v 10' at the command line) to be applied to the raw particle after alignment but before averaging (for example, a threshold to exclude extreme values, or a highphass filter if you have phaseplate data.)""")
 205 
 206 	parser.add_argument("--weighbytiltaxis",type=str,default='',help="""Default=None. A,B, where A is an integer number and B a decimal. A represents the location of the tilt axis in the tomogram in pixels (eg.g, for a 4096x4096xZ tomogram, this value should be 2048), and B is the weight of the particles furthest from the tiltaxis. For example, --weighbytiltaxis=2048,0.5 means that praticles at the tilt axis (with an x coordinate of 2048) will have a weight of 1.0 during averaging, while the distance in the x coordinates of particles not-on the tilt axis will be used to weigh their contribution to the average, with particles at the edge(0+radius or 4096-radius) weighing 0.5, as specified by the value provided for B.""")
 207 
 208 	parser.add_argument("--weighbyscore",action='store_true',default=False,help="""Default=False. This option will weigh the contribution of each subtomogram to the average by score/bestscore.""")
 209 
 210 	#parser.add_argument("--automask",action="store_true",help="""Applies a 3-D automask before centering. Can help with negative stain data, and other cases where centering is poor.""")
 211 	
 212 	#parser.add_argument("--resample",action="store_true",help="""If set, will perform bootstrap resampling on the particle data for use in making variance maps.""",default=False)
 213 
 214 	'''
 215 	CLASSAVERAGE SPECIFIC PARAMETERS
 216 	'''
 217 	
 218 	parser.add_argument("--goldstandardoff", action="store_true", default=False, help="""Default=False. This will PREVENT splitting the dataset provided through --input into two groups, and the entire dataset will be refined together. If this parameter is NOT supplied (and thus the refinement is 'gold standard') and --ref is supplied, two copies of the reference will be generated and randomphase-lowpass filtered to the resolution specified through --refrandphase.""")
 219 
 220 	parser.add_argument("--classmx", type=str, default='', help="""Default=None. The name of the classification matrix specifying how particles in 'input' should be grouped. If omitted, all particles will be averaged.""")
 221 	
 222 	parser.add_argument("--recompute",action='store_true',default=False,help="""default=False. This parameter requires --classmx and will recompute averages (for example, even and odd) based on the classmx file and the alignment parameters specified therein for each particle. No refinements will follow. This is exclusively for recomputing averages.""")
 223 	
 224 	parser.add_argument("--donotaverage",action="store_true", default=False,help="""Default=False. If e2spt_refinemulti.py is calling e2spt_classaverage.py, the latter need not average any particles, but rather only yield the alignment results.""")
 225 	
 226 	parser.add_argument("--ref", type=str, default='', help="""Default=None. Reference image. Used as an initial alignment reference. The refinements are 'gold standard' by default, and therefore two independent copies of the reference will be generated and randomphase-lowpass filtered to the resolution specified through --refrandphase. To turn dataset splitting and gold standard refinement off, supply --goldstandardoff.""", guitype='filebox', browser='EMBrowserWidget(withmodal=True,multiselect=True)', filecheck=False, row=1, col=0, rowspan=1, colspan=3, mode='alignment')
 227 	
 228 	parser.add_argument("--refpreprocess",action="store_true",default=False,help="""Default=False. This will preprocess the reference identically to the particles. It is off by default, but it is internally turned on when no reference is supplied. It should probably be off when using a crystal structure (with all positive densities) turned to EM density as an initial model, but it should be on when using an EM map.""")
 229 	
 230 	parser.add_argument("--refrandphase", type=float, default=60.0, help="""Default=60.0. Resolution to phase randomize the reference to (or the two copies of the reference if --goldstandardoff is NOT supplied [gold standard refinement is on by default].""")
 231 	
 232 	parser.add_argument("--hacref",type=int,default=0,help="""Default=0 (not used by default). Size of the SUBSET of particles to use to build an initial reference by calling e2spt_hac.py which does Hierarchical Ascendant Classification (HAC) or 'all vs all' alignments.""") 
 233 		
 234 	parser.add_argument("--ssaref",type=int,default=0,help="""Default=0 (not used by default). Size of the SUBSET of particles to use to build an initial reference by calling e2symsearch3d.py, which does self-symmetry alignments. You must provide --sym different than c1 for this to make any sense.""")
 235 		
 236 	parser.add_argument("--btref",type=int,default=0,help="""Default=0 (internally turned on and set to 64). Size of the SUBSET of particles to use to build an initial reference by calling e2spt_binarytree.py. By default, the largest power of two smaller than the number of particles in --input will be used. For example, if you supply a stack with 150 subtomograms, the program will automatically select 128 as the limit to use because it's the largest power of 2 that is smaller than 150. But if you provide, say --btref=100, then the number of particles used will be 64, because it's the largest power of 2 that is still smaller than 100.""")
 237 	
 238 	parser.add_argument("--resultmx",type=str,default=None,help="""Default=None. Specify an output image to store the result matrix. This is in the same format as the classification matrix. http://blake.bcm.edu/emanwiki/EMAN2/ClassmxFiles""")
 239 	
 240 	parser.add_argument("--refinemultireftag", type=str, default='', help="""Default=''. DO NOT USE THIS PARAMETER. It is passed on from e2spt_refinemulti.py if needed.""")
 241 
 242 	parser.add_argument("--keep",type=float,default=1.0,help="""Default=1.0 (all particles kept). The fraction of particles to keep in each class.""", guitype='floatbox', row=6, col=0, rowspan=1, colspan=1, mode='alignment,breaksym')
 243 	
 244 	parser.add_argument("--keepsig", action="store_true", default=False,help="""Default=False. Causes the keep argument to be interpreted in standard deviations.""", guitype='boolbox', row=6, col=1, rowspan=1, colspan=1, mode='alignment,breaksym')
 245 
 246 	parser.add_argument("--tweak",action='store_true',default=False,help="""WARNING: BUGGY. This will perform a final alignment with no downsampling [without using --shrink or --shrinkfine] if --shrinkfine > 1.""")
 247 	
 248 	(options, args) = parser.parse_args()
 249 	
 250 	print "Initially, options.goldstandardoff is", options.goldstandardoff, type(options.goldstandardoff)
 251 	
 252 	'''
 253 	Get rootpath to provide absoulute paths to files. 
 254 	Make the directory where to create the database where the results will be stored, 
 255 	if --resume is not provided.
 256 	'''
 257 	
 258 	if options.recompute:
 259 		options.align = 'rotate_translate_3d_tree'
 260 	
 261 	if 'tree' in options.align:
 262 		options.falign = None
 263 		options.mask = None
 264 		options.lowpass = None
 265 		options.highpass = None
 266 		options.normproc = None
 267 		options.lowpassfine = None
 268 		options.highpassfine = None
 269 
 270 	rootpath = os.getcwd()
 271 
 272 	if not options.resume:
 273 		options = sptmakepath(options,'spt')	
 274 	else:
 275 		if rootpath not in options.resume:
 276 			options.resume = rootpath + '/' + options.resume
 277 	
 278 		if not options.path:
 279 			print """\nERROR: If you provide --resume, you need to specify which working 
 280 			directory needs to be resumed. Provide it through --path"""			
 281 			sys.exit()
 282 
 283 	if rootpath not in options.path:
 284 		options.path = rootpath + '/' + options.path
 285 		
 286 	#abspath= rootpath + '/' + options.path
 287 	#print "\nThus the abs path could be", abspath
 288 	
 289 	nptcl = EMUtil.get_image_count(options.input)
 290 	
 291 	if nptcl < 2:
 292 		options.goldstandardoff = True
 293 	
 294 	if not options.input:
 295 		parser.print_help()
 296 		exit(0)
 297 	elif options.subset:
 298 		#print "there is subset!", options.subset
 299 		
 300 		if options.subset < nptcl:
 301 			#print "smaller than the number of particles", nptcl
 302 			
 303 			if options.goldstandardoff:
 304 				print "goldstandard is off"
 305 				if options.subset < 2:
 306 					if not options.ref:
 307 						print "ERROR: You need at least 2 particles in --input to buidl a reference if --ref is not provided."""
 308 						sys.exit()	
 309 			else:
 310 				print "goldstandard is on"
 311 				if options.subset < 4:
 312 					print "ERROR: You need at least 4 particles in --input for goldstandard refinement if --ref is not provided and --goldstandardoff not provided."""
 313 					sys.exit()
 314 				
 315 				if options.hacref and options.subset < options.hacref * 2:
 316 					print """WARNING: --subset=%d wasn't large enough to accommodate gold standard
 317 					refinement with two independent halves using the specified number of particles
 318 					for initial model generation --hacref=%d. Therefore, --hacref will be reset
 319 					to --subset/2.""" %( options.subset, options.hacref )				
 320 					options.hacref = options.subset / 2			
 321 			
 322 				elif options.ssaref and options.subset < options.ssaref * 2:		
 323 					print """WARNING: --subset=%d wasn't large enough to accommodate gold standard
 324 					refinement with two independent halves using the specified number of particles
 325 					for initial model generation --ssaref=%d. Therefore, --ssaref will be reset
 326 					to --subset/2.""" %( options.subset, options.ssaref )
 327 					options.ssaref = options.subset / 2	
 328 			
 329 				elif options.btref and options.subset < options.btref * 2:			
 330 					print """WARNING: --subset=%d wasn't large enough to accommodate gold standard
 331 					refinement with two independent halves using the specified number of particles
 332 					for initial model generation --btref=%d. Therefore, --btref has been reset
 333 					to --subset/2.""" %( options.subset, options.btref )
 334 					options.btref = options.subset / 2
 335 					
 336 			subsetStack = options.path + '/subset' + str( options.subset ).zfill( len( str( options.subset))) + '.hdf' 
 337 			print "\nSubset to be written to", subsetStack
 338 		
 339 			subsetcmd = 'e2proc3d.py ' + options.input + ' ' + subsetStack + ' --first=0 --last=' + str(options.subset-1) 
 340 			print "Subset cmd is", subsetcmd
 341 		
 342 			runcmd( options, subsetcmd )
 343 			#p=subprocess.Popen( subsetcmd, shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE )
 344 			#text=p.communicate()	
 345 			#p.stdout.close()
 346 		
 347 			options.input = subsetStack
 348 			nptcl = EMUtil.get_image_count(options.input)
 349 			print "\nnptcl for subset is now", nptcl
 350 			
 351 		else:
 352 			print """\n(e2spt_classaverage)(main) WARNING: --subset was larger or equal to the number of particles in --input. 
 353 			Therefore, using all particles."""
 354 	else:
 355 		if not options.ref:
 356 			if not options.goldstandardoff:
 357 				if nptcl < 4:
 358 					print "ERROR: You need at least 4 particles in --input for goldstandard refinement if --ref is not provided and --goldstandardoff not provided."""
 359 					sys.exit()
 360 				else:
 361 					pass
 362 			else:
 363 				if nptcl < 2:
 364 					print "ERROR: You need at least 2 particles in --input to build a reference (or to --recompute and average) if --ref is not provided."""
 365 					sys.exit()
 366 		#else:
 367 		#	print "ref is", options.ref
 368 					
 369 	if not options.translateonly:
 370 		if options.search or options.searchfine:
 371 			options = sptParseAligner( options )
 372 		
 373 			print "aligner parsed", sptParseAligner
 374 	else:
 375 		options.align = 'rotate_translate_3d_grid:phi0=0:phi1=1:alt0=0:alt1=1:az0=0:az1=1:dphi=2:daz=2:dalt=2'
 376 		
 377 		if options.search:
 378 			options.align += ':search=' + str(options.search)
 379 			
 380 		#if options.searchx:
 381 		#	options.align += ':searchx=' + str(options.searchx)
 382 		
 383 		#if options.searchy:
 384 		#	options.align += ':searchy=' + str(options.searchy)
 385 		
 386 		#if options.searchz:
 387 		#	options.align += ':searchz=' + str(options.searchz)
 388 	
 389 	if options.shrink < options.shrinkfine:
 390 		options.shrink = options.shrinkfine
 391 		print "WARNING: It makes no sense for shrinkfine to be larger than shrink; therefore, shrink will be made to match shrinkfine"
 392 	
 393 	print "after fixing searches but before calcali options.falign is", options.falign
 394 	if options.radius and float(options.radius) > 0.0:
 395 		#print "(e2spt_classaverage)(main) before calling calcAliStep, options.input is", options.input
 396 		options = calcAliStep(options)
 397 	
 398 	'''
 399 	Parse parameters such that "None" or "none" are adequately interpreted to turn of an option
 400 	'''
 401 	options = sptOptionsParser( options )
 402 	
 403 	print "after parsing options, options.goldstandardoff is", options.goldstandardoff, type(options.goldstandardoff)
 404 	print options.align, type(options.align)
 405 		
 406 	if options.resultmx: 
 407 		print "\nSorry, resultmx not implemented yet"
 408 	
 409 	#????
 410 	if options.resultmx != None: 
 411 		options.storebad = True
 412 					
 413 	ptclshdr = EMData(options.input,0,True)
 414 	nx = ptclshdr["nx"]
 415 	ny = ptclshdr["ny"]
 416 	nz = ptclshdr["nz"]
 417 	if nx!=ny or ny!=nz :
 418 		print "ERROR, input volumes are not cubes"
 419 		sys.exit(1)
 420 	
 421 	if options.ref:
 422 		refhdr = EMData( options.ref, 0, True )
 423 		if refhdr["nx"]!=nx or refhdr["ny"]!=ny or refhdr["nz"]!=nz : 
 424 			print """(e2spt_classaverage)(main). 
 425 			ERROR: ref volume size %d, %d, %d, not the same as input volume(s)' size, 
 426 			%d, %d, %d""" %(refhdr["nx"], refhdr["ny"], refhdr["nz"],nx, ny, nz)
 427 			sys.exit(1)
 428 	
 429 	#if not options.donotaverage:		
 430 		#if '.hdf' not in options.output:					
 431 		#	print "(e2spt_classaverage)(main) - Error in output name. Format must be '.hdf'; make sure you didn't mistake a comma for a dot"
 432 		#	sys.exit(1)
 433 	#	pass
 434 		
 435 	logger = E2init(sys.argv, options.ppid)
 436 	
 437 	'''
 438 	Store parameters in parameters.txt file inside --path
 439 	'''
 440 	cmdwp = writeParameters(options,'e2spt_classaverage.py', 'sptclassavg')
 441 	
 442 	preprocnameCoarse = preprocnameFine = ''
 443 	
 444 	if options.savepreprocessed:
 445 		dummy = EMData(8,8,8)
 446 		dummy.to_one()
 447 	
 448 		preprocnameCoarse = options.input.replace('.hdf','_preprocCoarse.hdf')
 449 		if options.path not in preprocnameCoarse:
 450 			preprocnameCoarse = options.path + '/' + preprocnameCoarse
 451 			
 452 		preprocnameFine = options.input.replace('.hdf','_preprocFine.hdf')
 453 		if options.path not in preprocnameFine:
 454 			preprocnameFine = options.path + '/' + preprocnameFine
 455 
 456 		for i in range(nptcl):
 457 			dummy.write_image( preprocnameCoarse ,i)
 458 			
 459 			if options.falign and options.falign != 'None' and options.falign != 'none':
 460 				dummy.write_image( preprocnameFine ,i)
 461 				
 462 	#if options.classmx and options.groups:
 463 	#	print """ERROR: --groups is used to separate the data arbitrarily into classes.
 464 	#			It should not be provided if you supply --classmx, and vice versa."""
 465 	#	sys.exit(1)
 466 		
 467 	if nptcl<1 : 
 468 		print "(e2spt_classaverage)(main) - ERROR : at least 1 particle required in input stack"
 469 		sys.exit(1)
 470 		
 471 	if nptcl==1:
 472 		if options.iter>1 :
 473 			print "(e2spt_classaverage)(main) - Error: makes no sense to have iter>1 with one particle"
 474 			sys.exit(1)
 475 		
 476 		if options.keepsig or options.keep!=1.0 :
 477 			print "(e2spt_classaverage)(main) - Error: do not use --keepsig with one particle, also keep should be 1.0 if specified"
 478 			sys.exit(1)
 479 
 480 	'''
 481 	Initialize parallelism if being used
 482 	'''
 483 	
 484 	if options.parallel :
 485 	
 486 		if options.parallel == 'none' or options.parallel == 'None' or options.parallel == 'NONE':
 487 			options.parallel = None
 488 			etc = None
 489 		
 490 		else:
 491 			print "\n\n(e2spt_classaverage)(main) - INITIALIZING PARALLELISM!"
 492 			print "\n\n"
 493 			from EMAN2PAR import EMTaskCustomer
 494 			etc=EMTaskCustomer(options.parallel)
 495 			pclist=[options.input]
 496 		
 497 			if options.ref: 
 498 				pclist.append(options.ref)
 499 			etc.precache(pclist)
 500 	else:
 501 		etc=''
 502 
 503 	if options.inputaliparams: 
 504 		preOrientationsDict = js_open_dict(options.inputaliparams)
 505 		
 506 	resumeDict = {}
 507 	actualNumsDict = {}
 508 	actualNumsEven = []
 509 	actualNumsOdd = []
 510 	
 511 	try:
 512 		if options.resume: 
 513 			#print "The resume dict to open is", options.resume
 514 			resumeDict = js_open_dict( options.resume )
 515 		
 516 			#print "Resume dict is", resumeDict
 517 			for key in resumeDict.keys():
 518 				#print "\n\nKKKKKKey is", key
 519 			
 520 				keyint = int ( key.split('_')[-1] )
 521 				#print "\n\nkeyint is", keyint
 522 			
 523 				if keyint % 2:
 524 					actualNumsOdd.append( keyint )
 525 				else:	
 526 					actualNumsEven.append( keyint )
 527 		
 528 			actualNumsDict.update({ 0:actualNumsEven, 1:actualNumsOdd })
 529 				
 530 			#print "\nActualNumsDict is", actualNumsDict	
 531 
 532 			resumeDict.close()
 533 	except:
 534 		pass
 535 			
 536 	ncls = 1
 537 	
 538 	classmxFile = options.path + '/classmx_' + str( 0 ).zfill( len (str (options.iter))) + '.hdf'
 539 	
 540 	if not options.goldstandardoff and nptcl > 1:
 541 		ncls = 2
 542 	
 543 	#C: Read or initialize all classmx images as 2-D EMData files of 2 x N dimensions, where N is the number of particles.
 544 
 545 	refsdict = {}
 546 	
 547 	if options.classmx:
 548 		print "\n--classmx=",options.classmx
 549 		classmxFile = options.classmx
 550 		classmx = EMData.read_images( classmxFile )		# we keep the entire classification matrix in memory, since we need to update it in most cases
 551 		#ncls = int(classmx[0]["maximum"])
 552 		#ncls = int( classmx[0]['nx'] )					#In this program, this will always be 1 for non-goldstandard and 2 for gold standard
 553 		
 554 		scoresImg = classmx[0]
 555 		
 556 		if options.inputaliparams:
 557 			inputalidict = js_open_dict( options.inputaliparams )
 558 			numdigits = len( inputalidict.keys()[0].split('_')[-1] )	#keys are of the form 'subtomo_XXX'; we determine how many XXX the keys have
 559 			
 560 			
 561 			
 562 		for ic in range(ncls):
 563 			resultsforaverage = {}
 564 	
 565 			#ptclnums = classmx_ptcls( classmx, ic )
 566 			
 567 			
 568 			#print "x dimension of classmx file is",  scoresImg['nx']
 569 			#ptclnums = []
 570 			#for j in range( scoresImg['nx'] ):				#Loop over the number of classes
 571 			ptclnums=[]									#each of which starts with no particles in it
 572 			
 573 			ptclrange = scoresImg['ny']
 574 			if options.subset:
 575 				ptclrange = options.subset
 576 				
 577 			for i in range( ptclrange ):			#Loop over the number of particles
 578 				score = scoresImg.get_value_at(ic,i)
 579 				print "score in classmx is", score
 580 				if score:
 581 					print "therefore score is TRUE (should not be if it is 0.0!!!!)"
 582 					ptclnums.append(i)
 583 					
 584 			
 585 			for indx in ptclnums:
 586 				
 587 				score = classmx[0].get_value_at(ic,indx)
 588 				
 589 				if float( score ) > -0.000000001:
 590 					print "\nscore in classmx was empty (positive or zero)"
 591 					
 592 					if options.recompute:
 593 						if not options.inputaliparams:
 594 							if options.keepsig or options.keep < 1.0:
 595 								print "\n(e2spt_classaverage)(main) ERROR: if --recompute and --keep (or --keepsig) are provided simulaneously, you must also provide --inputaliparams"
 596 								sys.exit()
 597 							
 598 					if options.inputaliparams:
 599 						numtag = str( indx ).zfill( numdigits )
 600 						key = 'subtomo_' + numtag
 601 						score = inputalidict[ key ][1]
 602 						print "\nfor subtomo %d, getting score from --inputaliparams, score=%f" %( indx, score )
 603 					
 604 				#classnum = classmx[0][indx]
 605 				'''
 606 				weight = classmx[1][indx]
 607 				tx = classmx[2][indx]
 608 				ty = classmx[3][indx]
 609 				tz = classmx[4][indx]
 610 				az = classmx[5][indx]
 611 				alt = classmx[6][indx]
 612 				phi = classmx[7][indx]
 613 				scale = classmx[8][indx]
 614 				#score = classmx[9][indx]
 615 				'''
 616 				
 617 				weight = classmx[1].get_value_at(ic,indx)
 618 				tx = classmx[2].get_value_at(ic,indx)
 619 				ty = classmx[3].get_value_at(ic,indx)
 620 				tz = classmx[4].get_value_at(ic,indx)
 621 				az = classmx[5].get_value_at(ic,indx)
 622 				alt = classmx[6].get_value_at(ic,indx)
 623 				phi = classmx[7].get_value_at(ic,indx)
 624 				scale = classmx[8].get_value_at(ic,indx)
 625 				
 626 				alitransform = Transform({'type':'eman','az':az,'alt':alt,'phi':phi,'tx':tx,'ty':ty,'tz':tz})
 627 				
 628 				if options.inputaliparams:
 629 					key = 'subtomo_' + numtag
 630 					transformjson = inputalidict[ key ][0]
 631 					
 632 					print "\ntransform classmx is", alitransform
 633 					print "\nwhereas transform json is", transformjson
 634 				
 635 				resultsforaverage.update( { indx:[score,alitransform] } )
 636 			
 637 			print "\nic is", ic
 638 			if ic == 0:
 639 				print "\nresults for average EVEN are", resultsforaverage
 640 				
 641 			if ic == 1:
 642 				print "\nresults for average ODD are", resultsforaverage
 643 			
 644 			ret = makeAverage( options, ic, resultsforaverage, 0 )
 645 			ref = ret[0]
 646 			#weights = ret[1]
 647 			
 648 			refsdict.update({ ic : ref })	
 649 			
 650 			if options.recompute:
 651 				if ic == 0:
 652 					ref.write_image( options.path + '/recomp_avg_even.hdf', 0 )
 653 				elif ic == 1:
 654 					ref.write_image( options.path + '/recomp_avg_odd.hdf', 0 )
 655 			else:
 656 				if ic == 0:
 657 					ref.write_image( options.path + '/avgs_even.hdf', 0 )
 658 				elif ic == 1:
 659 					ref.write_image( options.path + '/avgs_odd.hdf', 0 )
 660 			
 661 		#try:
 662 		if refsdict:
 663 			refeven = refsdict[0]
 664 			refodd = refsdict[1]
 665 		
 666 			fscfile = options.path + '/recompute_fsc.txt'
 667 			
 668 			
 669 			
 670 			print "options.align is", options.align
 671 			refsavg = compareEvenOdd( options, refeven, refodd, 2, etc, fscfile, 'initial' ) #We pass on an "iteration number" > 1, that is 2 here, just so that both initial references are preprocessed and treated equally (this is required since --refpreprocessing is off when --ref is supplied 
 672 			
 673 			if options.recompute:
 674 				refsavg.write_image( options.path + '/recomp_avg.hdf', 0 )
 675 			
 676 		else:
 677 			print "\nERROR: building initial references from classmx failed!"
 678 
 679 		
 680 		if options.inputaliparams:
 681 			inputalidict.close()
 682 		
 683 		if options.recompute:
 684 			print "recompute finished"
 685 			sys.exit()	
 686 	else:
 687 		options.classmx = classmxFile
 688 			
 689 		#C: classmx images are put into a single stack of 2-D images with 9 images in it (0-8)
 690 		#C: and written out so that the file exists when accessed later by the code
 691 			
 692 		classmxScores = EMData(ncls,nptcl)
 693 		classmxWeights = EMData(ncls,nptcl)
 694 		classmxXs = EMData(ncls,nptcl)
 695 		classmxYs = EMData(ncls,nptcl)
 696 		classmxZs = EMData(ncls,nptcl)
 697 		classmxAzs = EMData(ncls,nptcl)
 698 		classmxAlts = EMData(ncls,nptcl)
 699 		classmxPhis = EMData(ncls,nptcl)
 700 		classmxScales = EMData(ncls,nptcl)
 701 
 702 		classmxScores.to_zero()
 703 		
 704 		if not options.goldstandardoff and ncls > 1 and nptcl > 1:
 705 			for i in range(nptcl):
 706 				klassid = 0
 707 				if i % 2:
 708 					klassid = 1
 709 					#print "\n(e2spt_classaverage)(main) - Particle %d will belong to odd class" %( i )
 710 					#classmxScores.set_value_at( 1, i, 1.0 )					
 711 				
 712 				print "\n(e2spt_classaverage)(main) - Particle %d will belong to classid %d" %( i, klassid )
 713 				classmxScores.set_value_at( klassid, i, 1.0 )
 714 					
 715 		else:
 716 			classmxScores.to_one() 		#If there's only one class, all particles are in it
 717 		
 718 		classmxWeights.to_one() 	#Particles contribute entirely and equally to the class to which they are assigned
 719 		classmxXs.to_zero()
 720 		classmxYs.to_zero()
 721 		classmxZs.to_zero()
 722 		classmxAzs.to_zero()
 723 		classmxAlts.to_zero()
 724 		classmxPhis.to_zero()
 725 		classmxScales.to_one()		#One means no scaling
 726 	
 727 		classmxScores.write_image(classmxFile,0)
 728 		classmxWeights.write_image(classmxFile,1)
 729 		classmxXs.write_image(classmxFile,2)
 730 		classmxYs.write_image(classmxFile,3)
 731 		classmxZs.write_image(classmxFile,4)
 732 		classmxAzs.write_image(classmxFile,5)
 733 		classmxAlts.write_image(classmxFile,6)
 734 		classmxPhis.write_image(classmxFile,7)	
 735 		classmxScales.write_image(classmxFile,8)
 736 			
 737 	print "\n(e2spt_classaverage)(main) - classmx files initialized."
 738 			
 739 	'''		
 740 	Figure out from the classmx file how many classes there are, and what particle indexes are in each
 741 	'''	
 742 	
 743 	ptclnumsdict = {}			#Each class might have a different list of particle numbers. 
 744 								#We store them in a master dictionary, read only once.
 745 	if ncls == 1: 
 746 		ptclnumsdict.update({ 0 : range(nptcl) })		#If there's only one class, all indexes from 0 to nptcl will be in it
 747 	elif ncls > 1:
 748 		classmx = EMData.read_images( options.classmx )
 749 		if options.verbose:
 750 			#print "\n(e2spt_classaverage)(main) - Read classmx file and its type are", options.classmx, type(classmx)
 751 			pass
 752 			
 753 		#ptclnums = classmx_ptcls(classmx,ic)				# This gets the list from the classmx
 754 
 755 		scoresImg = classmx[0]
 756 		#print "x dimension of classmx file is",  scoresImg['nx']
 757 		for j in range( scoresImg['nx'] ):				#Loop over the number of classes
 758 			ptclnums=[]									#each of which starts with no particles in it
 759 			for i in range( scoresImg['ny'] ):			#Loop over the number of particles
 760 				score = scoresImg.get_value_at(j,i)
 761 				if score:								#If a score is non-zero for a [class,ptclindx] position, 
 762 					ptclnums.append(i)					#in scoresImg, add that ptclindx to the ptclnums list for this class
 763 					
 764 			ptclnumsdict.update({ j : ptclnums })
 765 
 766 	
 767 	if not options.goldstandardoff:
 768 		print "\nThe number of classes and their indexes are"
 769 		for klass in ptclnumsdict:
 770 			print "Klass is", klass
 771 			print "Indexes are", ptclnumsdict[ klass ]
 772 	
 773 	#originalOutput = options.output
 774 	
 775 	
 776 	if not refsdict:
 777 		print "\nno classmx provided. Building initial models from scratch"
 778 		refsdict = sptRefGen( options, ptclnumsdict, cmdwp )
 779 	
 780 		if not refsdict:
 781 			print "\nERROR: failed to build initial models from scratch"
 782 			sys.exit()
 783 		
 784 	if not options.goldstandardoff and ncls > 1 and nptcl > 1:
 785 		
 786 		refeven = refsdict[0]
 787 		refodd = refsdict[1] 
 788 			
 789 		if options.filterbyfsc:
 790 			fscfile = options.path + '/initialrefs_fsc.txt'
 791 			
 792 			if not options.ref:	
 793 				refsavg = compareEvenOdd( options, refeven, refodd, 2, etc, fscfile, 'initial' ) #We pass on an "iteration number" > 1, that is 2 here, just so that both initial references are preprocessed and treated equally (this is required since --refpreprocessing is off when --ref is supplied 
 794 				
 795 				if options.savesteps:
 796 					refname = options.path + '/initialrefs_avg.hdf'
 797 					refsavg.write_image(refname, 0)
 798 				
 799 			else:
 800 				calcFsc( options, refeven, refodd, fscfile )
 801 			
 802 			print "options.lowpass and type were", options.lowpass, type( options.lowpass )
 803 			options.lowpass = ('filter.wiener.byfsc',{ 'fscfile':fscfile })
 804 			print "Now it is", options.lowpass, type(options.lowpass)
 805 
 806 	
 807 	'''
 808 	We loop over the number of iterations first (opposed to older versions where you refined
 809 	the classes for a given number of iterations), because at the end of every iteration
 810 	the averages of the classes ought to be compared. E.g., for gold standard refinement 
 811 	purposes and dynamic filtering by FSC, there are only 2 classes (0 and 1), and you measure
 812 	the resolution by aligning the average of class 0 to the average of class 1 and computing the FSC.
 813 	This FSC is then used to filter the particles and the average during preprocessing.
 814 	'''
 815 	
 816 	#ptclshdr = EMData( options.input, 0, True )
 817 	
 818 	'''
 819 	This dictionary is used as one measure of convergence. If two consecutive averages have the same values on the header, they are equivalent and the algorithm has converged
 820 	'''
 821 	avgshdrs = { 0:[''] }
 822 	print "Upon initial setup, avgshdrs[0] is",  avgshdrs[0], type( avgshdrs[0] )
 823 	
 824 	'''
 825 	This dictionary will store the mean score per class across alignment iterations
 826 	'''
 827 	meanScores = { 0:[0] }
 828 	
 829 	if not options.goldstandardoff and ncls > 1 and nptcl > 1:
 830 		avgshdrs.update({1:['']})
 831 		meanScores.update( { 1:[0] } )
 832 		print "Upon initial setup, avgshdrs[1] is",  avgshdrs[1], type( avgshdrs[1] )
 833 	
 834 	
 835 	gc.collect()	#free up unused memory
 836 	for it in range( options.iter ):
 837 	
 838 		if options.verbose: 
 839 			print "\n(e2spt_classaverage)(main) - ###### Iteration %d/%d"%(it, options.iter)
 840 	
 841 		classmxFile = options.path + '/classmx_' + str( it ).zfill( len (str (options.iter))) + '.hdf'
 842 		
 843 		#weightsMasterDict = {}
 844 		
 845 		for ic in range( ncls ):
 846 			weights = {}
 847 			if options.verbose:
 848 				print "Processing class %d/%d"%(ic+1,ncls)
 849 
 850 			ptclnums = ptclnumsdict[ ic ]
 851 			
 852 			
 853 			
 854 			klassid = '_even'
 855 			if ic == 1:
 856 				klassid = '_odd'
 857 			if options.goldstandardoff:
 858 				klassid = ''
 859 			
 860 			
 861 			print "for klassid", klassid
 862 			print "ptclnums are", ptclnums
 863 			print "\n"
 864 				
 865 			#if options.savesteps:
 866 			#	refname = options.path + '/class_' + str(ic).zfill( len( str(ic) )) + '.hdf'
 867 			#	ref.write_image(refname,-1)
 868 
 869 			#print "\nFor class", ic
 870 			#print "Particle numbers are", ptclnums		
 871 			#print "\n"
 872 			
 873 			#options.output = originalOutput.replace('.hdf', '_' + str(ic).zfill( len (str (ncls))) + '.hdf')
 874 			#options.output = originalOutput
 875 			
 876 			#if ncls > 1:
 877 				
 878 			
 879 				#options.output = originalOutput.replace('.hdf', klassid + '.hdf')	
 880 		
 881 			#resNum = 0
 882 			resumeDict = {}
 883 		
 884 			#for it in range(options.iter):
 885 			
 886 			# In 2-D class-averaging, each alignment is fast, so we send each node a class-average to make
 887 			# in 3-D each alignment is very slow, so we use a single ptcl->ref alignment as a task
 888 
 889 			tasks=[]
 890 			results=[]
 891 			try:
 892 				ref = refsdict[ ic ]
 893 				#if options.parallel: 
 894 				ref.write_image(os.path.join(options.path,"tmpref.hdf"),0)
 895 			
 896 			except:
 897 				print "reference is empty"
 898 				print "classmx is", options.classmx
 899 				
 900 				sys.exit()
 901 			
 902 			# ALL CLIPPING and preprocessing should happen inside the preprocessing function
 903 			
 904 			'''
 905 			Code to 'resume' crashed jobs
 906 			'''
 907 			try:
 908 				if options.resume and actualNumsDict[ ic ]:
 909 					resumeDict = js_open_dict(options.resume)
 910 					#resNum += 1
 911 			except:
 912 				pass
 913 					
 914 			for ptclnum in ptclnums:
 915 				nptclsinklass =len( ptclnums )
 916 				
 917 				try:
 918 					if actualNums and ptclnum in actualNums:
 919 						print """Skipping this particle because you provided --resume and the alignment info for this particle is aready present.
 920 						Info for particle loaded into results""", ptclnum
 921 					
 922 						tomoID = "subtomo_" + str(ptclnum).zfill( len(str( len(ptclnums) )) )
 923 					
 924 						if tomoID not in resumeDict.keys():
 925 							print "ERROR: This key is not in the file provided for --resume", tomoID
 926 							sys.exit() 
 927 					
 928 			
 929 						if len(resumeDict.keys()) > 0:
 930 							keys = resumeDict.keys()
 931 						
 932 							for key in keys:
 933 								if type(resumeDict[key]) is not list:					 
 934 									print """ERROR: Your sptali_ir.json file seems to be incomplete. The value for the particle key is a Transform(), but should be a list.
 935 									The file should contain a dictionary where there's a 'key' for each particle, containing the word 'subtomo_' followed by the particle's index 
 936 									(with as many digits as there are orders of magnitude in the set; for example
 937 									the first particle in a set of 10 would be 'subtomo_0', but in a set of 10 to 100 it would be 'subtomo_00', and in a set of 101 to 1000
 938 									it would be 'subtomo_000'), and the 'value' of each key would be a list with two elements, [ Transform(), score ], where Transform
 939 									contains the alignment parameters between a particle and the reference, and score the cross correlation score for that alignment.""" 
 940 									sys.exit()
 941 							
 942 						results.append( [ {'xform.align3d': resumeDict[tomoID][0] , 'score':resumeDict[tomoID][1] } ] )
 943 				except:
 944 					pass	
 945 					
 946 				transform = None					
 947 				if options.inputaliparams:
 948 					tomoID = "subtomo_" + str(ptclnum).zfill( len(str( len(ptclnums) )) )
 949 					transform = preOrientationsDict[tomoID][0]
 950 					
 951 					print transform
 952 					print "Of type", type(transform)
 953 			
 954 				if options.parallel:
 955 					task=Align3DTask(["cache",os.path.join(options.path,"tmpref.hdf"),0],["cache",options.input,ptclnum],ptclnum,"Ptcl %d in iter %d"%(ptclnum,it),options,transform,it)
 956 					tasks.append(task)
 957 				else:
 958 					#print "No parallelism specified"
 959 					result=align3Dfunc(["cache",os.path.join(options.path,"tmpref.hdf"),0],["cache",options.input,ptclnum],ptclnum,"Ptcl %d in iter %d"%(ptclnum,it),options,transform,it)
 960 					results.append(result['final'])
 961 			
 962 			# start the alignments running
 963 			if options.parallel:
 964 				tids=etc.send_tasks(tasks)
 965 				if options.verbose: 
 966 					print "%d tasks queued in class %d iteration %d"%(len(tids),ic,it) 
 967 
 968 				"""Wait for alignments to finish and get results"""
 969 				#results=get_results(etc,tids,options.verbose,jsA,len(ptclnums),1)
 970 				#results=get_results(etc,tids,options.verbose,jsA, nptcl ,1)
 971 				#def get_results(etc,tids,verbose,nptcls,ref=''):
 972 				
 973 				results = filter( None, get_results(etc,tids,options.verbose, nptcl ) )
 974 				
 975 				#results = get_results(etc,tids,options.verbose, nptcl )
 976 				
 977 				
 978 				
 979 			if options.verbose > 2: 
 980 				print "Results:" 
 981 				pprint(results)
 982 						
 983 			if not options.donotaverage:					
 984 				#ref = make_average(options,ic,options.input,options.path,results,options.averager,options.saveali,options.saveallalign,options.keep,options.keepsig,options.sym,options.groups,options.breaksym,options.nocenterofmass,options.verbose,it)
 985 				ref=''
 986 				
 987 				'''
 988 				If there's more than one particle, the reference for the next round will
 989 				be the average of the aligned particles
 990 				'''
 991 				if nptcl > 1:
 992 				
 993 					resultsforaverage = {}
 994 					for r in results:
 995 						ptclindx = r[1]
 996 						score = r[0][0]["score"]
 997 						alitransform = r[0][0]["xform.align3d"]
 998 						resultsforaverage.update( { ptclindx:[score,alitransform] } )
 999 					
1000 				
1001 					ret = makeAverage(options, ic,resultsforaverage,it)
1002 					ref = ret[0]
1003 					weights = ret[1]
1004 					
1005 					#weightsid = 'even'
1006 					#if ic == 1:
1007 					#	weightsid = 'odd'
1008 					#if options.goldstandardoff:
1009 					#	klassid = 'all'
1010 					
1011 					#weightsMasterDict.update({ weightsid: weights })
1012 					
1013 				else:
1014 					'''
1015 					If there's only one particle, the "reference" for the next round will
1016 					be the one aligned particle. However, since it makes no sense to have
1017 					more iterations, the program will be stopped shortly, after the
1018 					aligned particle is written out
1019 					'''
1020 					ref = EMData( options.input, 0 )
1021 					
1022 					print "results len", len(results)
1023 					print "results[0]", results[0]
1024 					print "results", results
1025 					try:
1026 						ref.process_inplace("xform",{"transform":results[0][0][0]["xform.align3d"]})
1027 					
1028 					except:
1029 						ref.process_inplace("xform",{"transform":results[0][0]["xform.align3d"]})
1030 
1031 					ref['origin_x'] = 0
1032 					ref['origin_y'] = 0
1033 					ref['origin_z'] = 0
1034 					
1035 					if options.postprocess:
1036 						ppref = ref.copy()
1037 						maskPP = "mask.sharp:outer_radius=-2"
1038 						maskPP=parsemodopt(maskPP)
1039 
1040 						ppref = postprocess(ppref,maskPP,options.normproc,options.postprocess)
1041 
1042 						refnamePP = refname.replace('.hdf', '_postproc.hdf')
1043 				
1044 						#ppref.write_image("%s/class_%02d.hdf"%(options.path,ic),it)
1045 						ppref.write_image(refnamePP,it)
1046 					
1047 					outname = options.path + '/final_avg.hdf'
1048 					#if options.output:
1049 					#	outname = options.path + '/' + options.output
1050 					
1051 					ref.write_image( outname , 0)
1052 					print "Done alignig the only particle in --input to --ref"
1053 					sys.exit()
1054 			
1055 				ref['xform.align3d']=Transform()
1056 				ref['origin_x'] = 0
1057 				ref['origin_y'] = 0
1058 				ref['origin_z'] = 0
1059 				
1060 				refsdict.update( { ic:ref } )
1061 
1062 				if options.savesteps:
1063 				
1064 					#refname = options.path + '/class_' + str(ic).zfill( len( str(ic) )) + '.hdf'
1065 					refname = options.path + '/avgs' + klassid + '.hdf'
1066 				
1067 						
1068 					ref.write_image( refname, it )
1069 				
1070 					#print "\n\nFor the final avg going into class_x.hdf, ali params are", ref['xform.align3d']
1071 				
1072 					if options.postprocess:
1073 						ppref = ref.copy()
1074 						maskPP = "mask.sharp:outer_radius=-2"
1075 						maskPP=parsemodopt(maskPP)
1076 	
1077 						ppref = postprocess(ppref,maskPP,options.normproc,options.postprocess)
1078 
1079 						refnamePP = refname.replace('.hdf', '_postproc.hdf')
1080 					
1081 						#ppref.write_image("%s/class_%02d.hdf"%(options.path,ic),it)
1082 						ppref.write_image(refnamePP,it)
1083 				
1084 				if it == options.iter -1:
1085 				
1086 					outname = options.path + '/final_avg.hdf'
1087 					#if options.output:
1088 					#	outname = options.path + '/' + options.output
1089 				
1090 					ref.write_image( outname , 0 )			
1091 			elif options.donotaverage: 				#This is used with e2spt_multirefine, where all you need is the alignment info, but not the averages
1092 				for p in range( nptcl ):
1093 					weights.update( {p:1.0} )
1094 				
1095 				
1096 				
1097 			'''
1098 			Define and open the .json dictionaries where alignment and score values will be stored, for each iteration,
1099 			and for each reference if using multiple model refinement
1100 			'''
1101 			
1102 			jsAliParamsPath = options.path + '/sptali_ir.json'
1103 			
1104 			
1105 			print "options.refinemultireftag and type are", options.refinemultireftag, type(options.refinemultireftag)
1106 			
1107 			if not options.refinemultireftag:
1108 				jsAliParamsPath = jsAliParamsPath.replace('.json', '_' + str(it).zfill( len(str(options.iter))) + '.json')
1109 				
1110 				
1111 
1112 			print "(e2spt_classaverage) This is the .json file to write", jsAliParamsPath
1113 			jsA = js_open_dict( jsAliParamsPath ) #Write particle orientations to json database.
1114 			
1115 			iii = 0
1116 			
1117 			classScoresList = [] 
1118 			
1119 			'''
1120 			Iterate over alignment results to write them to classmx.hdf and .json files
1121 			'''
1122 			print "len results is", len(results)
1123 			print "should match nptcl", nptcl
1124 			
1125 			print "results are", results
1126 			
1127 			for r in results:
1128 				#if r and r[0]:	
1129 				
1130 				
1131 				ptclindx = r[1]
1132 					
1133 				score = r[0][0]['score']
1134 				if options.verbose > 3:
1135 					print "for particle %d score is %.4f" %(ptclindx,score)
1136 				
1137 				#classmxScores.set_value_at(ic,iii,score)
1138 				#posscore = math.fabs(score)
1139 				
1140 				classScoresList.append(score)
1141 				
1142 				#print "\n\n\n\n\n\n\n\nThe appended positive score is", posscore
1143 				#weight=1.0
1144 				
1145 				if options.verbose > 3:
1146 					print "classmxWeights img is", classmxWeights['nx'], classmxWeights['ny'], classmxWeights['nz']
1147 					
1148 				print "weights is", weights
1149 				print "gout to set value for classmxweights at", ic, ptclindx, weights[ptclindx]
1150 				classmxWeights.set_value_at(ic,ptclindx,weights[ptclindx])
1151 				
1152 				print "set value for classmxweights at", ic, ptclindx, weights[ptclindx]
1153 				
1154 				t = r[0][0]['xform.align3d']
1155 				
1156 				if options.verbose > 3:
1157 					print "and transform is",t
1158 			
1159 				xformslabel = 'subtomo_' + str( ptclindx ).zfill( len( str( nptcl ) ) )			
1160 				jsA.setval( xformslabel, [ t , score ] )
1161 			
1162 				if options.verbose > 3:
1163 					print "wrote info to .json file"
1164 				
1165 				if options.saveallpeaks and options.npeakstorefine > 1 and options.falign:
1166 					scoresCoarse = []
1167 					
1168 					rCoarse = r[2]
1169 					
1170 					for peak in rCoarse:
1171 						sc = peak['score']
1172 						
1173 						if float( sc ) < 0.0:
1174 							scoresCoarse.append( sc )
1175 						
1176 					meanPeaksScoresCoarse = numpy.mean( scoresCoarse )
1177 					
1178 					#sigmaPeaksScoresCoarse = 0.0
1179 					#if len( scoresCoarse ) > 1:
1180 					
1181 					sigmaPeaksScoresCoarse = numpy.std( scoresCoarse )
1182 					
1183 					
1184 					if len(scoresCoarse) > 1:
1185 					
1186 					
1187 						jsAliParamsPathCoarseAll = options.path + '/sptali_ir_coarse_allpeaks.json'
1188 	
1189 						if not options.refinemultireftag:
1190 							jsAliParamsPathCoarseAll = jsAliParamsPathCoarseAll.replace('.json', '_' + str(it).zfill( len(str(options.iter))) + '.json')
1191 	
1192 						jsAcoarseAll = js_open_dict( jsAliParamsPathCoarseAll )
1193 						
1194 						pi = 0
1195 						
1196 						for peak in rCoarse:
1197 							tc = peak['xform.align3d']
1198 							sc = peak['score']
1199 						
1200 							if float( sc ) < 0.0:
1201 								z = ( sc - meanPeaksScoresCoarse ) / sigmaPeaksScoresCoarse
1202 						
1203 								print "pi, sc, meanPeaksScoresCoarse, sigmaPeaksScoresCoarse, z", pi, sc, meanPeaksScoresCoarse, sigmaPeaksScoresCoarse, z
1204 						
1205 								peaklabel = str( pi ).zfill( len( str(options.npeakstorefine) ) )
1206 						
1207 								jsAcoarseAll.setval( xformslabel + '_peak_' + peaklabel , [ tc , 'ccc='+str(sc), 'zscore='+str(z) ] )
1208 							else:
1209 								print "Empty score for peak", pi
1210 						
1211 							pi += 1	
1212 						
1213 						jsAcoarseAll.close()
1214 					
1215 					else:
1216 						print "WARNING: Not enough successful alignments to compute a z-score"
1217 				else:
1218 					pass #Only one peak from --npeakstorefine was refined
1219 						
1220 				
1221 				if options.verbose > 3:
1222 					print "setting classmx"
1223 						
1224 				trans=t.get_trans()
1225 				print "\n\n\nTranslations were", trans
1226 				print "Therefre the transform was", t
1227 				rots=t.get_rotation()
1228 			
1229 				tx=trans[0]
1230 				print "Translation in x was", tx
1231 				classmxXs.set_value_at(ic,ptclindx,tx)
1232 			
1233 				ty=trans[1]
1234 				print "Translation in y was", ty
1235 				classmxYs.set_value_at(ic,ptclindx,ty)
1236 			
1237 				tz=trans[2]
1238 				print "Translation in z was", tz
1239 				classmxZs.set_value_at(ic,ptclindx,tz)
1240 			
1241 				az=rots['az']
1242 				classmxAzs.set_value_at(ic,ptclindx,az)
1243 			
1244 				alt=rots['alt']
1245 				classmxAlts.set_value_at(ic,ptclindx,alt)
1246 			
1247 				phi=rots['phi']
1248 				classmxPhis.set_value_at(ic,ptclindx,phi)
1249 			
1250 				scale=1.0
1251 				classmxScales.set_value_at(ic,ptclindx,scale)
1252 			#else:
1253 			#	print "Warning!! Aberrant results r[0]", r[0]
1254 			#	print "and r", r
1255 			#	sys.exit()
1256 			
1257 			iii+=1
1258 				
1259 										
1260 			jsA.close()
1261 			print "closed .json file"
1262 			
1263 			
1264 			
1265 			classmxScores.write_image(classmxFile,0)
1266 			classmxWeights.write_image(classmxFile,1)
1267 			classmxXs.write_image(classmxFile,2)
1268 			classmxYs.write_image(classmxFile,3)
1269 			classmxZs.write_image(classmxFile,4)
1270 			classmxAzs.write_image(classmxFile,5)
1271 			classmxAlts.write_image(classmxFile,6)
1272 			classmxPhis.write_image(classmxFile,7)	
1273 			classmxScales.write_image(classmxFile,8)
1274 		
1275 			meanScore = sum( classScoresList ) / len( classScoresList )
1276 			
1277 			if it == 0:
1278 				meanScores[ic] = [ meanScore ]
1279 			else:
1280 				meanScores[ic].append( meanScore )
1281 			
1282 			
1283 			#classScoresList.reverse()
1284 			maxY = max(classScoresList) + 1
1285 		
1286 			plotX = range( len(classScoresList) )
1287 			maxX = max(plotX) + 1
1288 		
1289 			plotName = 'spt_cccs_' + str( it ).zfill( len( str( options.iter ) )) + klassid + '.png'
1290 			
1291 			classScoresList2plot = list(classScoresList)
1292 			classScoresList2plot.sort()
1293 			classScoresList2plot.reverse()
1294 			
1295 			txtname = plotName.replace('.png','.txt')
1296 			textwriter(classScoresList2plot,options,txtname, invert=1)
1297 				
1298 			if options.plots:
1299 				from e2spt_hac import plotter
1300 			
1301 				plotter(plotX, classScoresList2plot, options, plotName, maxX, maxY, 1, sort=1)
1302 			
1303 			gc.collect()	 #free up unused memory
1304 			
1305 			
1306 		
1307 		'''
1308 		Compare even and odd averages if the user did not provide --goldstandardoff
1309 		'''
1310 		if not options.goldstandardoff and len( refsdict ) > 1 and ncls > 1:
1311 			avgeven = refsdict[0]
1312 			avgodd = refsdict[1] 
1313 			
1314 			print "goldstandardoff is", options.goldstandardoff
1315 			print "avgshdrs[0] is and type", avgshdrs[0], type(avgshdrs[0])
1316 			print "avgshdrs[1] is and type", avgshdrs[1], type(avgshdrs[1])
1317 			
1318 			#print "avgeven is", avgeven
1319 			#print "avgodd is", avgodd
1320 			
1321 			avgshdrs[0].append( avgeven.get_attr_dict() )
1322 			avgshdrs[1].append( avgodd.get_attr_dict() )
1323 			
1324 			#avgshdrs.update( { 0:avgshdrs[0].append( avgeven.get_attr_dict() ), 1:avgshdrs[1].append( avgodd.get_attr_dict() ) } )
1325 			
1326 			if it > 0 and len(avgshdrs[0]) > 1 and len(avgshdrs[1]) > 1:
1327 				if avgshdrs[0][-1]['mean'] == avgshdrs[0][-2]['mean'] and avgshdrs[1][-1]['mean'] == avgshdrs[0][-2]['mean']:
1328 					print "Both independent averages have converged!"
1329 					#os.system('rm ' + options.path + '/tmpref.hdf')
1330 					os.remove( options.path + '/tmpref.hdf' )
1331 					sys.exit()
1332 				
1333 			fscfile = options.path + '/fsc_' + str(it).zfill( len( str(options.iter))) + '.txt'
1334 			
1335 			final_avg_ali2ref = compareEvenOdd(options, avgeven, avgodd, it, etc, fscfile, 'goldstandard'  )
1336 			
1337 			
1338 			if options.savesteps:
1339 				final_avg_ali2ref.write_image( options.path + '/avgs_ali2ref.hdf' , it)
1340 	
1341 			if it == options.iter -1 :
1342 			
1343 				outname = options.path + '/final_avg_ali2ref.hdf'
1344 				#if options.output:
1345 				#	outname = options.path + '/' + options.output
1346 					
1347 				final_avg_ali2ref.write_image( outname , 0)
1348 			
1349 			
1350 			
1351 			try:
1352 				if options.resume and actualNums:
1353 					resumeDict.close()
1354 			except:	
1355 				pass
1356 						
1357 			actualNums = [] 		#Reset this so that when --resume is provided the incomplete jason file is 'fixed' considering the info in actualNums only once
1358 
1359 		
1360 		elif options.goldstandardoff and options.ref:
1361 			avg = refsdict[0]
1362 			
1363 			print "avgshdrs[0] is and type", avgshdrs[0], type(avgshdrs[0])
1364 			avgshdrs[0].append( avg.get_attr_dict() )
1365 			
1366 			#avgshdrs.update( { 0:avgshdrs[0].append( avg.get_attr_dict() ) } )
1367 			
1368 			if it > 0 and len(avgshdrs[0]) > 1 :
1369 				if avgshdrs[0][-1]['mean'] == avgshdrs[0][-2]['mean']:
1370 					print "The average has converged!"
1371 					outname = options.path + '/final_avg.hdf'
1372 					avg.write_image( outname , 0)
1373 					#os.system('rm ' + options.path + '/tmpref.hdf')
1374 					os.remove( options.path + '/tmpref.hdf' )
1375 					sys.exit()
1376 
1377 			originalref = EMData( options.ref, 0 )
1378 			
1379 			fscfile = options.path + '/fsc_' + str(it).zfill( len( str(options.iter))) + '.txt'
1380 
1381 			#calcFsc( originalref, avg, fscfile )
1382 			
1383 			final_avg_ali2ref = compareEvenOdd( options, originalref, avg, it, etc, fscfile, 'refbased', average=False ) #We pass on an "iteration number" > 1, that is 2 here, just so that both initial references are preprocessed and treated equally (this is required since --refpreprocessing is off when --ref is supplied 
1384 			
1385 			if not options.sym:
1386 				if options.savesteps:
1387 					final_avg_ali2ref.write_image( options.path + '/avgs_ali2ref.hdf' , it)
1388 	
1389 				if it == options.iter -1 :
1390 			
1391 					outname = options.path + '/final_avg_ali2ref.hdf'
1392 					final_avg_ali2ref.write_image( outname , 0)
1393 			
1394 			
1395 			if options.filterbyfsc:
1396 				print "Options.lowpass was and of type", options.lowpass, type( options.lowpass )
1397 				options.lowpass = ('filter.wiener.byfsc',{ 'fscfile':fscfile })
1398 				print "Now it is", options.lowpass, type(options.lowpass)
1399 				
1400 		ic+=1	
1401 		#import matplotlib as plt
1402 		
1403 	for key in meanScores:
1404 		scores = meanScores[ key ]
1405 		klassid = '_even'
1406 		if key == 1:
1407 			klassid = '_odd'
1408 		if options.goldstandardoff:
1409 			klassid = ''
1410 	
1411 		maxY = max(scores) + 1
1412 		
1413 		plotX = range( len(scores) )
1414 		maxX = max(plotX) + 1
1415 	
1416 		plotName = 'spt_meanccc' + klassid + '.png'
1417 		
1418 		scores2plot = list(scores)
1419 		scores2plot.sort()
1420 		scores2plot.reverse()
1421 		
1422 		txtname = plotName.replace('.png','.txt')
1423 		textwriter( scores2plot ,options, txtname, invert=1 )
1424 	
1425 	if options.plots:
1426 		from e2spt_hac import plotter
1427 	
1428 		plotter(plotX, scores2plot, options, plotName, maxX, maxY, 1, sort=0)
1429 		
1430 		
1431 	if options.inputaliparams: 
1432 		preOrientationsDict.close()
1433 	
1434 	#os.system('rm ' + options.path + '/tmpref.hdf')
1435 	os.remove( options.path + '/tmpref.hdf' )
1436 	
1437 	print "Logger ending"	
1438 	E2end(logger)
1439 	
1440 	print "logger ended"
1441 	sys.stdout.flush()
1442 	
1443 	
1444 	
1445 	return
1446 
1447 
1448 '''
1449 This function tries to alleviate the issue of providing parameters both through the aligner,
1450 and outside of it. For example, --sym and --search, vs --align:whatever_alinger:sym=xxx:search=xxx.
1451 Command line options should take precedence.
1452 '''
1453 def sptParseAligner( options ):
1454 	
1455 	if options.align and 'rotate' in options.align:
1456 		#print "There's options.align", options.align
1457 		if options.sym and options.sym is not 'c1' and options.sym is not 'C1' and 'sym' not in options.align and 'grid' not in options.align:
1458 			if 'rotate_translate_3d' in options.align or 'rotate_symmetry_3d' in options.align:
1459 				options.align += ':sym=' + str( options.sym )
1460 			#print "And there's sym", options.sym
1461 			
1462 		if 'search' not in options.align:
1463 			if 'rotate_translate_3d' in options.align and options.search and 'tree' not in options.align:	
1464 				options.align += ':search=' + str( options.search )
1465 			
1466 		elif 'rotate_translate_3d' in options.align and 'tree' not in options.align:
1467 			searchA = options.align.split('search=')[-1].split(':')[0]
1468 			searchdefault = 8
1469 			
1470 			if options.search != searchdefault:
1471 						
1472 				prefix = options.align.split('search=')[0]
1473 				trail = options.align.split('search=')[-1].split(':')[-1]
1474 			
1475 				options.align =  prefix + 'search=' + str(options.search)
1476 				if len(trail) > 2 and '=' in trail:
1477 					options.align += ':' + trail 
1478 			
1479 				print """\nWARNING: --search is different from search= provided through
1480 				--align or its default value of 8. There's no need to specify both, but 
1481 				if you did, --search takes precedence :-) ."""
1482 				#sys.exit()
1483 			elif options.search == searchdefault:
1484 				options.search = searchA
1485 			
1486 
1487 		if "rotate_translate_3d_grid" in options.align:
1488 			if "alt0" and "alt1" in options.align:
1489 				alt0 = int(options.align.split('alt0')[-1].split(':')[0].replace('=',''))	
1490 				alt1 = int(options.align.split('alt1')[-1].split(':')[0].replace('=',''))
1491 				
1492 				print "alt0 and alt1 are", alt0,alt1, type(alt0), type(alt1)
1493 				print alt1-alt0 == 0
1494 				#sys.exit()
1495 				
1496 				if alt1-alt0 == 0:
1497 					print """\nERROR: alt0 and alt1 cannot be equal for rotate_translate_3d_grid.
1498 					If you want to inactivate searches in this angle, provide a alt0 and alt1
1499 					such that alt1-alt0 is NOT ZERO, and provide a step size for dalt that is larger
1500 					than this difference. For example: 
1501 					alt0=0:alt1=1:dalt=2."""
1502 					sys.exit()
1503 					
1504 			if "phi0" and "phi1" in options.align:
1505 				phi0 = int(options.align.split('phi0')[-1].split(':')[0].replace('=',''))	
1506 				phi1 = int(options.align.split('phi1')[-1].split(':')[0].replace('=',''))
1507 				
1508 				print "phi0 and phi1 are", phi0,phi1, type(phi0), type(phi1)
1509 				print phi1-phi0 == 0
1510 				#sys.exit()
1511 				
1512 				if phi1-phi0 == 0:
1513 					print """\nERROR: phi0 and phi1 cannot be equal for rotate_translate_3d_grid.
1514 					If you want to inactivate searches in this angle, provide a phi0 and phi1
1515 					such that phi1-phi0 is NOT ZERO, and provide a step size for dphi that is larger
1516 					than this difference. For example: 
1517 					phi0=0:phi1=1:dphi=2."""
1518 					sys.exit()
1519 					
1520 			if "az0" and "az1" in options.align:
1521 				az0 = int(options.align.split('az0')[-1].split(':')[0].replace('=',''))	
1522 				az1 = int(options.align.split('az1')[-1].split(':')[0].replace('=',''))
1523 				
1524 				print "az0 and az1 are", az0,az1, type(az0), type(az1)
1525 				print az1-az0 == 0
1526 				#sys.exit()
1527 				
1528 				if az1-az0 == 0:
1529 					print """\nERROR: az0 and az1 cannot be equal for rotate_translate_3d_grid.
1530 					If you want to inactivate searches in this angle, provide a az0 and az1
1531 					such that az1-az0 is NOT ZERO, and provide a step size for daz that is larger
1532 					than this difference. For example: 
1533 					az0=0:az1=1:daz=2."""
1534 					sys.exit()
1535 			
1536 	print "\n\nBefore adding and fixing searches, options.falign is", options.falign, type(options.falign)	
1537 	
1538 	if options.falign and options.falign != None and options.falign != 'None' and options.falign != 'none':
1539 		if 'search' not in options.falign and 'refine_3d_grid' in options.falign:		
1540 			options.falign += ':search=' + str( options.searchfine )
1541 			
1542 		else:
1543 			searchF = options.falign.split('search=')[-1].split(':')[0]
1544 			searchfinedefault = 2
1545 			
1546 			if options.searchfine != searchfinedefault:
1547 						
1548 				prefix = options.falign.split('search=')[0]
1549 				trail = options.falign.split('search=')[-1].split(':')[-1]
1550 				
1551 				options.falign =  prefix + 'search=' + str(options.searchfine)
1552 				
1553 				if len(trail) > 2 and '=' in trail:
1554 				
1555 					options.falign += ':' + trail 
1556 			
1557 				print """\nWARNING: --searchfine is different from search= provided through
1558 				--falign or its default value of 2. There's no need to specify both, but 
1559 				if you did, --searchfine takes precedence :-) ."""
1560 				#sys.exit()
1561 			
1562 			elif options.searchfine == searchfinedefault:
1563 				options.searchfine = searchF	
1564 	
1565 	
1566 	return options
1567 
1568 
1569 def compareEvenOdd( options, avgeven, avgodd, it, etc, fscfile, tag, average=True ):
1570 	
1571 	avgeven.process_inplace( 'normalize.edgemean' )
1572 	avgodd.process_inplace( 'normalize.edgemean' )
1573 	
1574 	#from EMAN2PAR import EMTaskCustomer
1575 	#etc=EMTaskCustomer(options.parallel)
1576 	
1577 	#tasks = []
1578 	
1579 	if not options.parallel:
1580 		print "ERROR: Parallelization cannot be turned off unless you supply --goldstandardoff as well"
1581 		sys.exit()
1582 	
1583 	#task = Align3DTask( avgeven, avgodd, 0, "avgeven(ref) vs avgodd", options, None, it, nptclsexception=1)
1584 	#tasks.append( task )
1585 
1586 	#tids = etc.send_tasks(tasks)
1587 	
1588 	#resultsAvgs = get_results( etc, tids, options.verbose, 1 )
1589 								#etc,tids,verbose,jsA,nptcls,savealiparams=0,ref=''
1590 								#def get_results(etc,tids,verbose,nptcls,ref=''):
1591 	
1592 	
1593 	if not options.sym:
1594 		resultsAvgs = align3Dfunc( avgeven, avgodd, 0, "avgeven(ref) vs avgodd", options, None, 2 )
1595 								
1596 		##print "r['final'] is", resultsAvgs['final']
1597 		##print "\nr['final'][0] is", resultsAvgs['final'][0]
1598 		#print "\nr[0][0] is", resultsAvgs[0][0]
1599 		#print "\nr[0][0[0] is", resultsAvgs[0][0][0]
1600 		##print "\nr['final'][0]['xform.align3d'] is", resultsAvgs['final'][0]["xform.align3d"]
1601 		#print "\nr[0][-1]", resultsAvgs[0][-1]
1602 	
1603 		transformAliOdd2even = resultsAvgs['final'][0]['xform.align3d']
1604 		scoreAliOdd2even = resultsAvgs['final'][0]['score']
1605 	
1606 		avgsDict = options.path + '/sptali_ir_' + str(it).zfill( len( str(options.iter))) + '_oddali2even.json'
1607 	
1608 		if not average: 
1609 			if tag == 'refbased':
1610 				avgsDict = options.path + '/sptali_ir_' + str(it).zfill( len( str( it ))) + '_avgali2ref.json'
1611 	
1612 		else:
1613 			if tag == 'initial':
1614 				avgsDict = options.path + '/sptali_ir_initialrefs_oddali2even.json'
1615 			if tag == 'recompute':
1616 				 options.path + '/sptali_ir_recompute_oddali2even.json'
1617 			
1618 		jsAvgs = js_open_dict( avgsDict )
1619 
1620 		xformslabel = 'subtomo_0'		
1621 	
1622 		jsAvgs.setval( xformslabel, [ transformAliOdd2even , scoreAliOdd2even ] )
1623 
1624 		jsAvgs.close()
1625 		
1626 		avgodd.transform( transformAliOdd2even )
1627 	
1628 	finalA = None
1629 	
1630 	if average:
1631 		avgr = Averagers.get( options.averager[0], options.averager[1])
1632 		avgr.add_image( avgeven )
1633 		avgr.add_image( avgodd )
1634 		finalA = avgr.finish()
1635 	
1636 		finalA['origin_x']=0
1637 		finalA['origin_y']=0		#The origin needs to be reset to ZERO to avoid display issues in Chimera
1638 		finalA['origin_z']=0
1639 		finalA['xform.align3d'] = Transform()
1640 
1641 		#apix = final_avg['apix_x']
1642 		
1643 	elif not average:
1644 		finalA = avgodd.copy()	#avgeven is the reference if --ref provided and --goldstandardoff, or the previous iteration average
1645 	
1646 	if options.sym and not options.breaksym :
1647 		finalA = finalA.process('xform.applysym',{'sym':options.sym})
1648 	
1649 	calcFsc( options, avgeven, avgodd, fscfile )
1650 	
1651 	finalA.process_inplace('normalize.edgemean')
1652 	
1653 	apix = finalA['apix_x']
1654 	nyquist = 2.0 * apix			#this is nyquist 'resolution', not frequency; it's the inverse of nyquist frequency
1655 	halfnyquist = 2.0 * nyquist
1656 	twothirdsnyquist = 3.0 * nyquist / 2.0
1657 	
1658 	eventhresh = avgeven['sigma']
1659 	#eventhresh = 0
1660 	avgevenmasked = avgeven.process('mask.auto3d',{'nshells':1,'nshellsgauss':5,'radius':1,'nmaxseed':1,'threshold': eventhresh })
1661 	
1662 	oddthresh = avgodd['sigma']
1663 	#oddthresh = 0
1664 	avgoddmasked = avgodd.process('mask.auto3d',{'nshells':1,'nshellsgauss':5,'radius':1,'nmaxseed':1,'threshold': oddthresh })
1665 	
1666 	fscfilemasked = fscfile.replace('.txt','_mask_supertight.txt')
1667 	
1668 	calcFsc( options, avgevenmasked, avgoddmasked, fscfilemasked )
1669 	
1670 	avgevenmasked = avgeven.process('mask.auto3d',{'nshells':1,'nshellsgauss':6,'radius':1,'nmaxseed':1,'threshold': eventhresh })
1671 	avgoddmasked = avgodd.process('mask.auto3d',{'nshells':1,'nshellsgauss':6,'radius':1,'nmaxseed':1,'threshold': oddthresh })
1672 	
1673 	fscfilemasked2 = fscfile.replace('.txt','_mask_lesstight.txt')
1674 	calcFsc( options, avgevenmasked, avgoddmasked, fscfilemasked2 )
1675 	
1676 	avgevenmasked = avgeven.process('mask.auto3d',{'nshells':3,'nshellsgauss':6,'radius':1,'nmaxseed':1,'threshold': eventhresh })
1677 	avgoddmasked = avgodd.process('mask.auto3d',{'nshells':3,'nshellsgauss':6,'radius':1,'nmaxseed':1,'threshold': oddthresh })
1678 	
1679 	fscfilemasked3 = fscfile.replace('.txt','_mask_itermediate.txt')
1680 	calcFsc( options, avgevenmasked, avgoddmasked, fscfilemasked3 )
1681 	
1682 	avgevenmasked = avgeven.process('mask.auto3d',{'nshells':4,'nshellsgauss':6,'radius':1,'nmaxseed':1,'threshold': eventhresh })
1683 	avgoddmasked = avgodd.process('mask.auto3d',{'nshells':4,'nshellsgauss':6,'radius':1,'nmaxseed':1,'threshold': oddthresh })
1684 	
1685 	fscfilemasked4 = fscfile.replace('.txt','_mask_loose.txt')
1686 	calcFsc( options, avgevenmasked, avgoddmasked, fscfilemasked4 )
1687 	
1688 	finalthresh = finalA['sigma']
1689 	
1690 	finalAfilt = finalA.copy()
1691 	finalAmasked = finalAfilt.process('mask.auto3d',{'nshells':1,'nshellsgauss':6,'radius':1,'nmaxseed':1,'threshold': finalthresh })
1692 	
1693 	finalAmasked.write_image( options.path + '/recomputed_final_avg_masked.hdf', 0 )
1694 	
1695 	return finalA
1696 
1697 
1698 def calcFsc( options, img1, img2, fscfile ):
1699 	
1700 	img1fsc = img1.copy()
1701 	img2fsc = img2.copy()
1702 	
1703 	apix = img1['apix_x']
1704 	
1705 	#if options.clipali:
1706 		#img1fsc = clip3D( img1fsc, options.clipali )
1707 		#img1fsc.process_inpl
1708 		
1709 	#	img1fsc.write_image(options.path +'/vol4fsc1.hdf',0)
1710 		
1711 	#	img2fsc = clip3D( img2fsc, options.clipali )
1712 	#	img2fsc.write_image(options.path +'/vol4fsc2.hdf',0)
1713 		
1714 	fsc = img1fsc.calc_fourier_shell_correlation( img2fsc )
1715 	third = len( fsc )/3
1716 	xaxis = fsc[0:third]
1717 	fsc = fsc[third:2*third]
1718 	saxis = [x/apix for x in xaxis]
1719 	Util.save_data( saxis[1], saxis[1]-saxis[0], fsc[1:-1], fscfile )
1720 	
1721 	return
1722 
1723 
1724 def clip3D( vol, sizex, sizey=0, sizez=0 ):
1725 	
1726 	if not sizey:
1727 		sizey=sizex
1728 	
1729 	if not sizez:
1730 		sizez=sizex
1731 	
1732 	volxc = vol['nx']/2
1733 	volyc = vol['ny']/2
1734 	volzc = vol['nz']/2
1735 	
1736 	Rvol =  Region( (2*volxc - sizex)/2, (2*volyc - sizey)/2, (2*volzc - sizez)/2, sizex , sizey , sizez)
1737 	vol.clip_inplace( Rvol )
1738 	#vol.process_inplace('mask.sharp',{'outer_radius':-1})
1739 	
1740 	return vol
1741 
1742 
1743 '''
1744 Function to generate the reference either by reading from disk or bootstrapping
1745 '''
1746 def sptRefGen( options, ptclnumsdict, cmdwp, refinemulti=0, method='',subset4ref=0):
1747 	
1748 	refsdict = {}
1749 	elements = cmdwp.split(' ')
1750 	
1751 	#print "elements are", elements
1752 	#print "ptclnumsdict received in sptRefGen is", ptclnumsdict
1753 	#print "RECEIVED CMDWP", cmdwp
1754 	#print 'Therefore elemnts are', elements
1755 	
1756 	#current = os.getcwd()
1757 	
1758 	for klassnum in ptclnumsdict:
1759 		
1760 		klassidref = '_even'
1761 		
1762 		if klassnum == 1:
1763 			klassidref = '_odd'
1764 		
1765 		try:
1766 			if options.goldstandardoff:
1767 				klassidref = ''
1768 		except:
1769 			if refinemulti:
1770 				klassidref = ''
1771 				
1772 		
1773 		if refinemulti:
1774 			zfillfactor = len(str( len( ptclnumsdict )))
1775 			
1776 			#if ptclnumsdict[klassnum]:
1777 			#else:
1778 			
1779 			klassidref = '_' + str( klassnum ).zfill( zfillfactor )
1780 			if len( ptclnumsdict ) < 2:
1781 				klassidref = '_' + str( refinemulti ).zfill( zfillfactor )	
1782 		
1783 		if options.ref: 
1784 			ref = EMData(options.ref,0)
1785 
1786 			if options.verbose:
1787 				print "\n(e2spt_classaverage)(sptRefGen) - READ reference and its types and min, max, sigma, mean stats are", options.ref, type(ref), ref['minimum'],ref['maximum'],ref['sigma'],ref['mean']
1788 
1789 			if not ref['maximum'] and not ref['minimum']:
1790 				print "(e2spt_classaverage)(sptRefGen) - ERROR: Empty/blank reference file. Exiting."
1791 				sys.exit()
1792 			
1793 			if options.apix:
1794 				ref['apix_x'] = options.apix
1795 				ref['apix_y'] = options.apix
1796 				ref['apix_z'] = options.apix
1797 			
1798 			if options.refrandphase:
1799 				filterfreq =  1.0/float( options.refrandphase )
1800 				ref.process_inplace("filter.lowpass.randomphase",{"cutoff_freq":filterfreq,"apix":ref['apix_x']})
1801 						
1802 				refrandphfile = options.path + '/' + os.path.basename( options.ref ).replace('.hdf','_randPH' + klassidref +'.hdf')
1803 				
1804 				if 'final_avg' in refrandphfile:								#you don't want any confusion between final averages produces in other runs of the program and references
1805 					refrandphfile = refrandphfile.replace('final_avg','ref')
1806 
1807 				ref['origin_x'] = 0
1808 				ref['origin_y'] = 0
1809 				ref['origin_z'] = 0
1810 				ref.write_image( refrandphfile, 0 )
1811 
1812 			if float(ref['apix_x']) <= 1.0:
1813 				print "\n(e2spt_classaverage)(sptRefGen) - WARNING: apix <= 1.0. This is most likely wrong. You might want to fix the reference's apix value by providing --apix or by running it through e2fixheaderparam.py"
1814 			
1815 			refsdict.update({ klassnum : ref })
1816 			
1817 		
1818 		else:
1819 			ptclnums = ptclnumsdict[ klassnum ]
1820 			#print "Therefore for class", klassnum
1821 			#print "ptclnums len and themsvels are", len(ptclnums), ptclnums
1822 		
1823 			if ptclnums:
1824 				ptclnums.sort()		
1825 			
1826 			try:
1827 				if options.hacref:
1828 					method = 'hac'
1829 			except:
1830 				pass
1831 			
1832 			try:
1833 				if options.btref:
1834 					method = 'bt'
1835 			except:
1836 				pass
1837 			
1838 			try:
1839 				if options.ssaref:
1840 					method = 'ssa'
1841 			except:
1842 				pass
1843 				
1844 			if not method and not options.ref:
1845 				method = 'bt'
1846 				print "\n\n\nbt by default!!!!"
1847 				
1848 			#elif options.hacref:
1849 			if method == 'hac':
1850 		
1851 				if options.verbose:
1852 					print "\n(e2spt_classaverage)(sptRefGen) - Generating initial reference using hierarchical ascendant classification through e2spt_hac.py"
1853 			
1854 				subsetForHacRef = 'spthac_refsubset'+ klassidref + '.hdf'
1855 			
1856 				i = 0
1857 				nptclsforref = 10
1858 				try:
1859 					if options.hacref:
1860 						nptclsforref = options.hacref								
1861 				except:
1862 					if subset4ref:
1863 						nptclsforref=subset4ref
1864 			
1865 				if nptclsforref >= len(ptclnums):
1866 					nptclsforref =  len(ptclnums)
1867 			
1868 				print "Hacreflimit is", nptclsforref
1869 				if nptclsforref < 3:
1870 					print """ERROR: You cannot build a HAC reference with less than 3 particles.
1871 					Either provide a larger --hacref number, a larger --subset number, or provide
1872 					--goldstandardoff"""
1873 				
1874 					sys.exit()
1875 			
1876 			
1877 				i = 0
1878 				while i < nptclsforref :
1879 					a = EMData( options.input, ptclnums[i] )
1880 					a.write_image( subsetForHacRef, i )
1881 					i+=1
1882 			
1883 				niterhac = nptclsforref - 1
1884 
1885 				hacelements = []
1886 				for ele in elements:
1887 					if 'btref' not in ele and 'hacref' not in ele and 'ssaref' not in ele and 'subset4ref' not in ele and 'refgenmethod' not in ele and 'nref' not in ele and 'output' not in ele and 'fsc' not in ele and 'subset' not in ele and 'input' not in ele and '--ref' not in ele and 'path' not in ele and 'keep' not in ele and 'iter' not in ele and 'subset' not in ele and 'goldstandardoff' not in ele and 'saveallalign' not in ele and 'savepreprocessed' not in ele:
1888 						hacelements.append(ele)
1889 			
1890 				cmdhac = ' '.join(hacelements)
1891 				cmdhac = cmdhac.replace('e2spt_classaverage','e2spt_hac')
1892 			
1893 				if refinemulti:
1894 					cmdhac = cmdhac.replace('e2spt_refinemulti','e2spt_hac')
1895 				
1896 			
1897 				hacrefsubdir = 'spthac_ref' + klassidref
1898 			
1899 				cmdhac+=' --path=' + hacrefsubdir
1900 				cmdhac+=' --iter='+str(niterhac)
1901 				cmdhac+=' --input='+subsetForHacRef
1902 				
1903 				runcmd( options, cmdhac )
1904 				#cmdhac += ' && mv ' + hacrefsubdir + ' ' + options.path + '/' + ' && mv ' + subsetForHacRef + ' ' + options.path
1905 
1906 				#cmdhac2 = 'mv ' + hacrefsubdir + ' ' + options.path + '/'
1907 				#runcmd( options, cmdhac2 )
1908 				os.rename( hacrefsubdir, options.path + '/' + hacrefsubdir)
1909 
1910 				
1911 				#cmdhac3 = 'mv ' + subsetForHacRef + ' ' + options.path
1912 				#runcmd( options, cmdhac3 )
1913 				os.rename( subsetForHacRef, options.path + '/' + subsetForHacRef)
1914 				
1915 				if options.verbose:
1916 					print "\n(e2spt_classaverage)(sptRefGen) - Command to generate hacref is", cmdhac
1917 				
1918 				ref = EMData( options.path + '/' + hacrefsubdir +'/final_avg.hdf', 0 )
1919 
1920 				refsdict.update({ klassnum : ref })
1921 		
1922 			#elif options.ssaref:
1923 			if method == 'ssa':
1924 				if options.verbose:
1925 					print "\n(e2spt_classaverage)(sptRefGen) - Generating initial reference using self symmetry alignment through e2symsearch3d.py"
1926 			
1927 				if options.sym == 'c1' or options.sym == 'C1':
1928 					print """\n(e2spt_classaverage)(sptRefGen) - ERROR: You must provide at least c2 or higher symmetry to use
1929 					--ssaref"""
1930 				#sys.exit(1)
1931 			
1932 				subsetForSsaRef = 'sptssa_refsubset'+ klassidref + '.hdf'
1933 			
1934 				nptclsforref = 10
1935 				try:
1936 					if options.ssaref:
1937 						nptclsforref=options.ssaref	
1938 				except:
1939 					if subset4ref:
1940 						nptclsforref=subset4ref
1941 			
1942 				if nptclsforref >= len(ptclnums):
1943 					nptclsforref =  len(ptclnums)
1944 			
1945 				i = 0
1946 				while i < nptclsforref :
1947 					a = EMData( options.input, ptclnums[i] )
1948 					a.write_image( subsetForSsaRef, i )
1949 					i+=1
1950 			
1951 				ssarefsubdir = 'sptssa_ref' + klassidref
1952 			
1953 				ssaelements = []
1954 				for ele in elements:
1955 					if 'btref' not in ele and 'hacref' not in ele and 'ssaref' not in ele and 'subset4ref' not in ele and 'refgenmethod' not in ele and 'nref' not in ele and 'fine' not in ele and 'fsc' not in ele and 'output' not in ele and 'path' not in ele and 'goldstandardoff' not in ele and 'saveallalign' not in ele and 'savepreprocessed' not in ele and 'align' not in ele and 'iter' not in ele and 'npeakstorefine' not in ele and 'precision'not in ele and '--radius' not in ele and 'randphase' not in ele and 'search' not in ele and '--save' not in ele and '--ref' not in ele and 'input' not in ele and 'output' not in ele and 'subset' not in ele:
1956 					#	print "Appended element", ele
1957 						ssaelements.append(ele)
1958 					#else:
1959 					#	print "SKIPPED element", ele
1960 					
1961 				#if 'e2spt_classaverage' in ssaelements:
1962 				#	print "HERE!"
1963 				#	sys.exit()
1964 				#else:
1965 				#	print "Not here, see", ssaelements
1966 				#	sys.exit()
1967 				
1968 				cmdssa = ' '.join(ssaelements)
1969 				cmdssa = cmdssa.replace('e2spt_classaverage','e2symsearch3d')
1970 			
1971 				if refinemulti:
1972 					cmdssa = cmdssa.replace('e2spt_refinemulti','e2symsearch3d')
1973 			
1974 				cmdssa += ' --input=' + subsetForSsaRef 
1975 				cmdssa += ' --path=' + ssarefsubdir
1976 				cmdssa += ' --symmetrize'
1977 				cmdssa += ' --average'
1978 				
1979 				runcmd( options, cmdssa )
1980 				
1981 				#ssarefname = 'ssaref_' + klassidref + '.hdf'
1982 				ssarefname = 'final_avg.hdf'
1983 				#cmdssa += ' --output=' + ssarefname
1984 			
1985 				#cmdssa += ' && mv ' + ssarefsubdir + ' ' + options.path + '/' + ' && mv ' + subsetForSsaRef + ' ' + options.path
1986 
1987 				#cmdssa2 = 'mv ' + ssarefsubdir + ' ' + options.path + '/'
1988 				#runcmd( options, cmdssa2 )
1989 				os.rename( ssarefsubdir, options.path + '/' + ssarefsubdir)
1990 				
1991 				
1992 				#cmdssa3 =  'mv ' + subsetForSsaRef + ' ' + options.path
1993 				#runcmd( options, cmdssa3 )
1994 				os.rename( subsetForSsaRef, options.path + '/' + subsetForSsaRef)
1995 				
1996 				if options.verbose:
1997 					print "\n(e2spt_classaverage)(sptRefGen) - Command to generate ssaref is", cmdssa
1998 
1999 				#p=subprocess.Popen( cmdssa, shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2000 				#text=p.communicate()	
2001 				#p.stdout.close()
2002 			
2003 				ref = EMData( options.path + '/' + ssarefsubdir +'/' + ssarefname, 0 )
2004 
2005 				refsdict.update({ klassnum : ref })
2006 
2007 			#elif not options.hacref and not options.ssaref:				
2008 			if method == 'bt':
2009 		
2010 				nptclsforref = 64
2011 				
2012 				#try:
2013 				#	if options.btref:
2014 				#		nptclsforref = options.btref		
2015 				#except:
2016 				#	if subset4ref:
2017 				#		nptclsforref = subset4ref
2018 			
2019 				#if nptclsforref >= len(ptclnums):
2020 				#	nptclsforref =  len(ptclnums)
2021 				
2022 				
2023 				
2024 				#from e2spt_binarytree import binaryTreeRef
2025 				print "\ninput is", options.input
2026 				print "with nimgs", EMUtil.get_image_count( options.input )
2027 				print "--goldstandardoff is", options.goldstandardoff
2028 				print "len ptclnums is", len(ptclnums)
2029 				
2030 				print "log 2 of that is" 
2031 				print log( len(ptclnums), 2 )
2032 			
2033 				niter = int(floor(log( len(ptclnums), 2 )))
2034 				print "and niter is", niter
2035 				nseed = 2**niter
2036 				print "therefore nseed=2**niter is", nseed
2037 				
2038 				
2039 				#try:
2040 				#	if options.btref:
2041 				#		niter = int(floor(log( options.btref, 2 )))
2042 				#		nseed=2**niter			
2043 				#except:
2044 				#	if subset4ref:
2045 				#		niter = int(floor(log( subset4ref, 2 )))
2046 				#		nseed=2**niter	
2047 				
2048 				
2049 				#if not options.goldstandardoff:
2050 				#	nseed /= 2
2051 				
2052 					
2053 				subsetForBTRef = 'sptbt_refsubset'+ klassidref + '.hdf'
2054 			
2055 				i = 0
2056 				
2057 				
2058 				#print "ptclnums are", ptclnums
2059 				#print "with len", len(ptclnums)
2060 				
2061 				while i < nseed :
2062 					print "i is", i
2063 					a = EMData( options.input, ptclnums[i] )
2064 					a.write_image( subsetForBTRef, i )
2065 					print "writing image %d to file %s, which will contain the subset of particles used for BTA reference building" %(i,subsetForBTRef)
2066 					i+=1
2067 
2068 				btelements = []
2069 				#print "elements are", elements
2070 				for ele in elements:
2071 					if 'btref' not in ele and 'hacref' not in ele and 'ssaref' not in ele and 'subset4ref' not in ele and 'refgenmethod' not in ele and 'nref' not in ele and 'output' not in ele and 'fsc' not in ele and 'subset' not in ele and 'input' not in ele and '--ref' not in ele and 'path' not in ele and 'keep' not in ele and 'iter' not in ele and 'goldstandardoff' not in ele and 'saveallalign' not in ele and 'savepreprocessed' not in ele:
2072 						#print "added ele", ele
2073 						btelements.append(ele)
2074 					else:
2075 						pass
2076 						#print "skipped ele", ele
2077 			
2078 				cmdbt = ' '.join(btelements)
2079 				cmdbt = cmdbt.replace('e2spt_classaverage','e2spt_binarytree')
2080 			
2081 				#print "wildcard is!", wildcard
2082 				#print "BEFORE replacement", cmdbt
2083 			
2084 				if refinemulti:
2085 					cmdbt = cmdbt.replace('e2spt_refinemulti','e2spt_binarytree')
2086 			
2087 			
2088 				btrefsubdir = 'sptbt_ref' + klassidref		
2089 			
2090 				cmdbt+=' --path=' + btrefsubdir
2091 				cmdbt+=' --iter=' + str( niter )
2092 				cmdbt+=' --input=' + subsetForBTRef
2093 				
2094 				runcmd( options, cmdbt )
2095 				
2096 				#cmdbt+= ' && mv ' + btrefsubdir + ' ' + options.path + '/' + ' && mv ' + subsetForBTRef + ' ' + options.path
2097 			
2098 				
2099 				if options.verbose:
2100 					print "\n(e2spt_classaverage)(sptRefGen) - Command to generate btref is", cmdbt
2101 
2102 				#p=subprocess.Popen( cmdbt, shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2103 				#text=p.communicate()	
2104 				#p.stdout.close()
2105 			
2106 			
2107 				#cmdbt2 = 'mv ' + btrefsubdir + ' ' + options.path + '/' 
2108 				#runcmd( options, cmdbt2 )
2109 				os.rename( btrefsubdir, options.path + '/' + btrefsubdir )
2110 				
2111 				
2112 				#cmdbt3 = 'mv ' + subsetForBTRef + ' ' + options.path
2113 				#runcmd( options, cmdbt3 )
2114 				os.rename( subsetForBTRef, options.path + '/' + subsetForBTRef )
2115 				
2116 				
2117 			
2118 				#if os.getcwd() not in options.path:
2119 				#	options.path = os.getcwd() + '/' + ptions.path
2120 			
2121 				print "\ncmdbt is", cmdbt
2122 			
2123 				#print "\nfindir are"
2124 				#findir=os.listdir(current)
2125 				#for f in findir:
2126 				#	print f
2127 			
2128 				print "The BT reference to load is in file",  options.path+ '/' +btrefsubdir +'/final_avg.hdf'
2129 				ref = EMData( options.path + '/' + btrefsubdir +'/final_avg.hdf', 0 )
2130 
2131 				refsdict.update({ klassnum : ref })
2132 	
2133 	refnames={}
2134 	#if options.savesteps:
2135 	#	if options.goldstandardoff and options.ref:
2136 	#		refname = options.path + '/refinitial.hdf'
2137 	#		ref = refsdict[0]
2138 	#		ref.write_image( refname, 0 )
2139 	#
2140 	#	else:
2141 	#		for klass in refsdict:
2142 	#			refname = options.path + '/refinitial_even.hdf'	
2143 	#			if klass == 1:
2144 	#				refname = options.path + '/refinitial_odd.hdf'
2145 	#			ref = refsdict[ klass ]
2146 	#			ref.write_image(refname, 0)
2147 
2148 	return refsdict
2149 	
2150 
2151 def runcmd(options,cmd):
2152 	if options.verbose > 9:
2153 		print "(e2spt_classaverage)(runcmd) running command", cmd
2154 	
2155 	p=subprocess.Popen( cmd, shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2156 	text=p.communicate()	
2157 	p.stdout.close()
2158 	
2159 	if options.verbose > 9:
2160 		print "(e2spt_classaverage)(runcmd) done"
2161 	return
2162 
2163 
2164 def sptOptionsParser( options ):
2165 	try:
2166 		if options.align:
2167 			#print "(e2spt_classaverage) --align to parse is", options.align
2168 			options.align=parsemodopt(options.align)
2169 		elif options.align == 'None' or  options.align == 'none':
2170 			options.align=None
2171 	except:
2172 		if options.verbose > 9:
2173 			print "\nWARNING (might not be relevant): --align not provided"
2174 	
2175 	
2176 	#try:
2177 	#	if options.ialign:
2178 	#		#print "(e2spt_classaverage) --align to parse is", options.align
2179 	#		options.ialign=parsemodopt(options.ialign)
2180 	#	elif options.ialign == 'None' or  options.ialign == 'none':
2181 	#		options.ialign=None
2182 	#except:
2183 	#	if options.verbose > 9:
2184 	#		print "\nWARNING (might not be relevant): --ialign not provided"
2185 	
2186 	
2187 		
2188 	try:
2189 		if options.falign and options.falign != None and options.falign != 'None' and options.falign != 'none': 
2190 			options.falign=parsemodopt(options.falign)
2191 		elif options.falign == 'None' or  options.falign == 'none':
2192 			options.falign=None
2193 	except:
2194 		if options.verbose > 9:
2195 			print "\nWARNING (might not be relevant): --falign not provided"
2196 	
2197 	try:
2198 		if options.aligncmp: 
2199 			options.aligncmp=parsemodopt(options.aligncmp)
2200 		elif options.aligncmp == 'None' or  options.aligncmp == 'none':
2201 			options.aligncmp=None
2202 	except:
2203 		if options.verbose > 9:
2204 			print "\nWARNING (might not be relevant): --aligncmp not provided"
2205 	
2206 	try:	
2207 		if options.faligncmp: 
2208 			options.faligncmp=parsemodopt(options.faligncmp)
2209 		elif options.faligncmp == 'None' or  options.faligncmp == 'none':
2210 			options.faligncmp=None
2211 	except:
2212 		if options.verbose > 9:
2213 			print "\nWARNING (might not be relevant): --faligncmp not provided"
2214 		
2215 	try:
2216 		if options.averager: 
2217 			options.averager=parsemodopt(options.averager)
2218 		elif options.averager == 'None' or  options.averager == 'none':
2219 			options.averager=None
2220 	except:
2221 		if options.verbose > 9:
2222 			print "\nWARNING (might not be relevant): --averager not provided"
2223 		
2224 	try:
2225 		if options.autocenter:
2226 			options.autocenter=parsemodopt(options.autocenter)
2227 		elif options.autocenter == 'None' or  options.autocenter == 'none':
2228 			options.autocenter=None
2229 	except:
2230 		if options.verbose > 9:
2231 			print "\nWARNING (might not be relevant): --autocenter not provided"
2232 		
2233 	try:
2234 		if options.autocentermask:
2235 			options.autocentermask=parsemodopt(options.autocentermask)
2236 		elif options.autocentermask == 'None' or  options.autocentermask == 'none':
2237 			options.autocentermask=None
2238 	except:
2239 		if options.verbose > 9:
2240 			print "\nWARNING (might not be relevant): --autocentermask not provided"
2241 	
2242 	try:
2243 		if options.normproc and options.normproc != 'None' and options.normproc != 'none':
2244 			options.normproc=parsemodopt(options.normproc)
2245 		elif options.normproc == 'None' or  options.normproc == 'none':
2246 			options.normproc=None
2247 	except:
2248 		if options.verbose > 9:
2249 			print "\nWARNING (might not be relevant): --normproc not provided"
2250 	
2251 	try:
2252 		if options.mask and options.mask != 'None' and options.mask != 'none':
2253 			#print "parsing mask", sys.exit()
2254 			options.mask=parsemodopt(options.mask)
2255 		elif options.mask == 'None' or  options.mask == 'none':
2256 			options.mask=None
2257 	except:
2258 		if options.verbose > 9:
2259 			print "\nWARNING (might not be relevant): --mask not provided"
2260 	
2261 	try:	
2262 		if options.preprocess and options.preprocess != 'None' and options.preprocess != 'none': 
2263 			options.preprocess=parsemodopt(options.preprocess)
2264 		elif options.preprocess == 'None' or  options.preprocess == 'none':
2265 			options.preprocess=None
2266 	except:
2267 		if options.verbose > 9:
2268 			print "\nWARNING (might not be relevant): --preprocess not provided"
2269 	
2270 	try:	
2271 		if options.threshold and options.threshold != 'None' and options.threshold != 'none': 
2272 			options.threshold=parsemodopt(options.threshold)
2273 		elif options.threshold == 'None' or  options.threshold == 'none':
2274 			options.threshold=None
2275 	except:
2276 		if options.verbose > 9:
2277 			print "\nWARNING (might not be relevant): --threshold not provided"
2278 	
2279 	try:
2280 		if options.preprocessfine and options.preprocessfine != 'None' and options.preprocessfine != 'none': 
2281 			options.preprocessfine=parsemodopt(options.preprocessfine)
2282 		elif options.preprocessfine == 'None' or  options.preprocessfine == 'none':
2283 			options.preprocessfine=None
2284 	except:
2285 		if options.verbose > 9:
2286 			print "\nWARNING (might not be relevant): --preprocessfine not provided"
2287 	
2288 	try:	
2289 		if options.lowpass and options.lowpass != 'None' and options.lowpass != 'none': 
2290 			options.lowpass=parsemodopt(options.lowpass)
2291 		elif options.lowpass == 'None' or  options.lowpass == 'none':
2292 			options.lowpass=None
2293 	except:
2294 		if options.verbose > 9:
2295 			print "\nWARNING (might not be relevant): --lowpass not provided"
2296 	
2297 	try:
2298 		if options.lowpassfine and options.lowpassfine != 'None' and options.lowpassfine != 'none': 
2299 			options.lowpassfine=parsemodopt(options.lowpassfine)
2300 		elif options.lowpassfine == 'None' or  options.lowpassfine == 'none':
2301 			options.lowpassfine=None
2302 	except:
2303 		if options.verbose > 9:
2304 			print "\nWARNING (might not be relevant): --lowpassfine not provided"
2305 	
2306 	try:
2307 		if options.highpass and options.highpass != 'None' and options.highpass != 'none': 
2308 			options.highpass=parsemodopt(options.highpass)
2309 		elif options.highpass == 'None' or  options.highpass == 'none':
2310 			options.highpass=None
2311 	except:
2312 		if options.verbose > 9:
2313 			print "\nWARNING (might not be relevant): --highpass not provided"
2314 	
2315 	try:
2316 		if options.highpassfine and options.highpassfine != 'None' and options.highpassfine != 'none': 
2317 			options.highpassfine=parsemodopt(options.highpassfine)
2318 		elif options.highpassfine == 'None' or  options.highpassfine == 'none':
2319 			options.highpassfine=None
2320 	except:
2321 		if options.verbose > 9:
2322 			print "\nWARNING (might not be relevant): --highpassfine not provided"
2323 	try:
2324 		if options.postprocess and options.postprocess != 'None' and options.postprocess != 'none': 
2325 			options.postprocess=parsemodopt(options.postprocess)
2326 		elif options.postprocess == 'None' or  options.postprocess == 'none':
2327 			options.postprocess=None
2328 	except:
2329 		if options.verbose > 9:
2330 			print "\nWARNING (might not be relevant): --postprocess not provided"
2331 	
2332 	try:
2333 		if options.reconstructor and options.reconstructor != 'None' and options.reconstructor != 'none': 
2334 			options.reconstructor=parsemodopt(options.reconstructor)
2335 		elif options.reconstructor == 'None' or  options.reconstructor == 'none':
2336 			options.reconstructor=None
2337 	except:
2338 		if options.verbose > 9:
2339 			print "\nWARNING (might not be relevant): --reconstructor not provided"
2340 	
2341 	try:
2342 		if options.preavgproc1 and options.preavgproc1 != 'None' and options.preavgproc1 != 'none': 
2343 			options.preavgproc1=parsemodopt(options.preavgproc1)
2344 		elif options.preavgproc1 == 'None' or  options.preavgproc1 == 'none':
2345 			options.preavgproc1=None
2346 	except:
2347 		if options.verbose > 9:
2348 			print "\nWARNING (might not be relevant): --reconstructor not provided"
2349 		
2350 	try:
2351 		if options.preavgproc2 and options.preavgproc2 != 'None' and options.preavgproc2 != 'none': 
2352 			options.preavgproc2=parsemodopt(options.preavgproc2)
2353 		elif options.preavgproc2 == 'None' or  options.preavgproc2 == 'none':
2354 			options.preavgproc2=None
2355 	except:
2356 		if options.verbose > 9:
2357 			print "\nWARNING (might not be relevant): --reconstructor not provided"
2358 	
2359 	return options
2360 
2361 
2362 '''
2363 Function to write the parameters used for every run of the program to parameters.txt inside the path specified by --path.
2364 *** Imported by many e2spt programs ***
2365 '''
2366 def writeParameters( options, program, tag ):
2367 	print "Tag received in writeParameters is", tag
2368 
2369 	names = dir(options)
2370 	
2371 	cmd = program
2372 	lines = []
2373 	now = datetime.datetime.now()
2374 	lines.append(str(now)+'\n')
2375 	
2376 	#print "\nnames are", names
2377 	optionscopy = options
2378 	
2379 	try:
2380 		if options.search == 0 or options.search == 0.0:
2381 			options.search = '0'
2382 	except:
2383 		pass
2384 	try:
2385 		if options.searchfine == 0 or options.searchfine == 0.0:
2386 			options.searchfine = '0'
2387 	except:
2388 		pass
2389 		
2390 	#print "mask in write parameters is", optionscopy.mask, type(optionscopy.mask)
2391 	for name in names:
2392 				
2393 		if getattr(options,name) and "__" not in name and "_" not in name:
2394 		#if "__" not in name and "_" not in name:	
2395 	
2396 			#if "__" not in name and "_" not in name and str(getattr(options,name)) and 'path' not in name and str(getattr(options,name)) != 'False' and str(getattr(options,name)) != 'True' and str(getattr(options,name)) != 'None':			
2397 			line = name + '=' + str(getattr(optionscopy,name))
2398 					
2399 			lines.append(line+'\n')
2400 			
2401 			if str(getattr(optionscopy,name)) != 'True' and str(getattr(optionscopy,name)) != 'False' and str(getattr(optionscopy,name)) != '':
2402 			
2403 				if name != 'parallel':
2404 					if "{" in str( getattr(optionscopy,name) ) or "}" in  str(getattr(optionscopy,name)) or ")" in  str(getattr(optionscopy,name)) or ")"  in str(getattr(optionscopy,name)): 
2405 						
2406 						tail = str( getattr(optionscopy,name) ).replace(':','=').replace('(','').replace(')','').replace('{','').replace('}','').replace(',',':').replace(' ','').replace("'",'')
2407 						if tail[-1] == ':':
2408 							tail = tail[:-1] 
2409 						cmd += ' --' + name + '=' + tail
2410 					else:
2411 						
2412 						tail = str( getattr(optionscopy,name) )
2413 						if tail[-1] == ':':
2414 							tail = tail[:-1]
2415 						cmd += ' --' + name + '=' + tail
2416 						
2417 				else:
2418 					cmd += ' --' + name + '=' + str(getattr(optionscopy,name))
2419 			
2420 			elif str(getattr(optionscopy,name)) == 'True' or str(getattr(optionscopy,name)) == 'False':
2421 				cmd += ' --' + name
2422 	
2423 	parmFile = 'parameters_' + tag + '.txt'
2424 	lines.append('\n'+cmd+'\n')
2425 	#f=open( optionscopy.path + '/' + parmFile,'w')
2426 	pfile = optionscopy.path + '/' + parmFile
2427 	f = open( pfile, 'w')
2428 	f.writelines(lines)
2429 	f.close()
2430 	
2431 	return cmd
2432 
2433 
2434 def calcAliStep(options):
2435 
2436 	print "\n\n(e2spt_classaverage)(calcAliStep) options.radius is", options.radius
2437 	print "(e2spt_classaverage)(calcAliStep) options.input is", options.input
2438 
2439 	hdr = EMData( options.input,0,True )
2440 	apix = float( hdr['apix_x'] )
2441 	capix = apix
2442 	fapix =apix
2443 	
2444 	#if options.shrinkfine and float(options.shrinkfine) > 1.0:
2445 	#	apix = 2*apix
2446 	
2447 	factor = factorc = factorf = factorRelative = 1.0
2448 	
2449 	#radPix = radPixC = radPixF = options.radius
2450 	
2451 	if options.shrink and float( options.shrink ) > 1.0:
2452 		print "options.shrink is > 1, see:", options.shrink
2453 		factorc = options.shrink
2454 		capix =  apix*factor
2455 	
2456 	if options.shrinkfine  and float( options.shrinkfine ) > 1.0:
2457 		factorf = options.shrinkfine
2458 		print "options.shrinkfine > 1, see:", options.shrinkfine
2459 		fapix = apix*factorf
2460 	
2461 	#if factorc > 1.0 and factorf > 1.0:										#The relative shrinking factor doesn't really matter
2462 	#	factorRelative = factorc / factorf
2463 	
2464 	print "\n\n\n\nAAAAAAAAAAA\nApix is", apix
2465 	print "\n\n\n\nAAAAAAAAAAA\n"
2466 	
2467 	radPixC = options.radius / capix
2468 	radPixF = options.radius / fapix
2469 
2470 	coarseStep1pix =  360.0/(2.0*math.pi*radPixC)
2471 	#coarseStep1pixRounded = math.floor(coarseStep1pix*100.00)/100.00
2472 	
2473 	if options.precision > 1.0:
2474 		coarseStep1pix *= options.precision
2475 	
2476 	fineStep = 360.0/(2.0*math.pi*radPixF)
2477 	if options.precision > 1.0:
2478 		fineStep *= options.precision
2479 	
2480 	fineStepRounded = math.floor(fineStep*100.00)/100.00					#Round fine step DOWN to scan slightly more finally than theoretically needed
2481 	
2482 	rango = 2.0 * fineStep													#Alignment goes from -range (rango) to +range
2483 	#rango = coarseStep / 2.0									
2484 	rangoRounded = math.ceil(rango*100.00)/100.00							#Round the range in fine alignments UP, to cover a slightly larger area than theoretically needed
2485 	
2486 	angularDistance = 2.0*rango												#This is the angular distance between points generated by the sphere aligner for coarse alignment
2487 	angularDistanceRounded = math.floor(angularDistance*100.00)/100.00		#Round angular distance between coarse points down, to sample more tightly
2488 	
2489 	coarseStep = angularDistanceRounded / 2.25								#The 2.25 factor is approximate. The angular distance A between two rotations R1=a1,b1,c1 and R2=a2,b2,c2 
2490 																			#where r1=a1=b1=c1 and r2=a2=b2=c2, for example R1=0,0,0 and R2=12,12,12, is roughly A=(r2-r1)*2.25 
2491 																			#This can be empirically verified with e2spt_transformdistance.py
2492 	
2493 	if coarseStep < coarseStep1pix:
2494 		coarseStep = coarseStep1pix
2495 		print """The coarse step %f was finer than one pixel at the edge of the particle, 
2496 		therefore it will be replaced with %f""" % (coarseStep,coarseStep1pix)
2497 	
2498 	CSrounded = math.floor( coarseStep * 100.00 )/100.00		#Round coarse step DOWN to scan slightly more finally than theoretically needed
2499 
2500 	
2501 	print "\n\n*****************"
2502 	print"\n\nThe radius in pixels at size for fine alignment (taking --shrinkfine into account) is", radPixF
2503 	print "Shrink is", options.shrink
2504 	print "Shrink refine is", options.shrinkfine
2505 	print "Therefore, the coarse step and itself rounded are", coarseStep, CSrounded
2506 	print "And the fine step before and after rounding is", fineStep, fineStepRounded
2507 	print "rango and its rounded are", rango, rangoRounded
2508 	print "\n\n*****************\n\n"
2509 	
2510 	searchC = hdr['nx']/2.0 - 2.0
2511 	searchF = 2
2512 	
2513 	if int( options.search ) > 1 :
2514 		searchC = options.search
2515 		print "searchC is", searchC
2516 	else:
2517 		if int(options.search) == 0:
2518 			searchC = 0
2519 			print "searchC is", searchC	
2520 
2521 		elif int( options.search ) == 1:
2522 			searchC = 1
2523 
2524 		#elif options.mask:
2525 		#	if 'mask.sharp' in options.mask[0]:
2526 		#		if 'outer_radius' in options.mask[1]:
2527 		#			om = options.mask[1]['outer_radius']
2528 		#	
2529 		#			if '-' in str(om):
2530 		#				om = hdr['nx']/2 + om
2531 		#			searchC = om/2.0
2532 		#			print "\nBecause the radius for the mask is", om
2533 		#			print "searchC is", searchC
2534 		#			print "\n"
2535 				
2536 		if options.shrink and float(options.shrink) > 1.0:
2537 			searchC = int( searchC / options.shrink )
2538 	
2539 			print "\nBecause shrink >1.0, searchC is actually", searchC
2540 		
2541 	if options.searchfine:
2542 		searchF = options.searchfine
2543 		print "\nSearchF is", searchF
2544 		print "\n"
2545 	else:
2546 		searchF = 0
2547 	
2548 	options.align = 'rotate_translate_3d:search=' + str(searchC) +':delta=' + str(CSrounded) + ':dphi=' + str(CSrounded)
2549 	if options.sym and options.sym is not 'c1' and options.sym is not 'C1' and 'sym' not in options.align:
2550 		options.align += ':sym=' + options.sym
2551 	
2552 	
2553 	print "inside calcali options.falign received is", options.falign
2554 	if options.falign and options.falign != None and options.falign != 'None' and options.falign != 'none':
2555 		#if options.radius:
2556 		options.falign = 'refine_3d_grid:range=' + str(rangoRounded) + ':delta=' + str(fineStepRounded) + ':search=' + str(searchF)
2557 	else:
2558 		options.falign = 'None'
2559 		
2560 	print "and has been set to", options.falign
2561 	
2562 	
2563 	if options.verbose:
2564 		if options.verbose > 9:
2565 			options.align += ':verbose=1'
2566 			options.falign += ':verbose=1'
2567 	
2568 	return options
2569 	
2570 	
2571 
2572 def postprocess(img,mask,normproc,postprocess):
2573 	'''Postprocesses a volume in-place'''
2574 	
2575 	# Make a mask, use it to normalize (optionally), then apply it 
2576 	maskimg=EMData(img["nx"],img["ny"],img["nz"])
2577 	maskimg.to_one()
2578 	if mask:
2579 		maskimg.process_inplace(mask[0],mask[1])
2580 		
2581 	# normalize
2582 	if normproc:
2583 		if normproc[0]=="normalize.mask": 
2584 			normproc[1]["mask"]=maskimg
2585 		img.process_inplace(normproc[0],normproc[1])
2586 
2587 	img.mult(maskimg)
2588 	
2589 	# Postprocess filter
2590 	if postprocess: 
2591 		img.process_inplace(postprocess[0],postprocess[1])
2592 	return img
2593 
2594 
2595 def sptmakepath(options, stem='spt'):
2596 	if options.verbose:
2597 		print "\n(e2spt_classaverage)(sptmakepath), stem is", stem
2598 	
2599 	#if options.path and ("/" in options.path or "#" in options.path):
2600 	#	print "Path specifier should be the name of a subdirectory to use in the current directory. Neither '/' or '#' can be included. "
2601 	#	sys.exit(1)
2602 
2603 
2604 	files=os.listdir(os.getcwd())
2605 
2606 	if not options.path:		
2607 		#options.path = stem + '_01'
2608 		options.path = stem
2609 		if options.verbose:
2610 			print """\n(e2spt_classaverage)(sptmakepath)--path was not specified, 
2611 			therefore it will have the default value""", options.path 	
2612 
2613 	while options.path in files:
2614 		if '_' not in options.path:
2615 			options.path = options.path + '_00'
2616 		else:
2617 			jobtag=''
2618 			components=options.path.split('_')
2619 			if components[-1].isdigit():
2620 				components[-1] = str(int(components[-1])+1).zfill(2)
2621 			else:
2622 				components.append('00')
2623 						
2624 			options.path = '_'.join(components)
2625 			#options.path = path
2626 	
2627 	print "The new options.path is", options.path
2628 
2629 	if options.path not in files:
2630 		if options.verbose:
2631 			print "I will make THIS path", options.path
2632 		
2633 		#os.system('mkdir ' + options.path)
2634 		os.mkdir( options.path )
2635 	
2636 	return options
2637 
2638 
2639 
2640 def preprocessing(image,options,ptclindx=0,tag='ptcls',coarse='yes',round=-1,finetag=''):
2641 
2642 	print "\n(e2spt_classaverage) preprocessing"
2643 	#print "Mask and its type are", mask, type(mask)
2644 	if options.mask == 'None' or options.mask  == 'none':
2645 		options.mask  = None
2646 	if options.lowpass  == 'None' or options.lowpass == 'none':
2647 		options.lowpass = None
2648 	if options.highpass == 'None' or options.highpass == 'none':
2649 		options.highpass = None
2650 	if options.preprocess == 'None' or options.preprocess == 'none':
2651 		options.preprocess = None
2652 	if options.threshold == 'None' or options.threshold == 'none':
2653 		options.threshold = None
2654 	
2655 	
2656 	if finetag:
2657 		try:
2658 			if options.lowpassfine  == 'None' or options.lowpassfine == 'none':
2659 				options.lowpassfine = None
2660 			if options.highpassfine == 'None' or options.highpassfine == 'none':
2661 				options.highpassfine = None
2662 			if options.preprocessfine == 'None' or options.preprocessfine == 'none':
2663 				options.preprocessfine = None
2664 		except:
2665 			print "\nWarning: Ignore if parameters --lowpassfine, --highpassfine, preprocessfine don't exist in the program you ran. Otherwise, something went wrong."
2666 				
2667 	if coarse != 'yes':
2668 		#print "lowpassfine received is", options.lowpass	
2669 		pass
2670 		
2671 	apix = image['apix_x']
2672 	
2673 	'''
2674 	Make the mask first 
2675 	'''
2676 	print "masking"
2677 	maskimg = EMData( int(image["nx"]), int(image["ny"]), int(image["nz"]) )
2678 	maskimg.to_one()
2679 	print "Donde creating mask"
2680 	
2681 	
2682 	simage = image.copy()
2683 
2684 
2685 	
2686 	if options.mask and options.mask != 'None' and options.mask != 'none':
2687 		#if options.verbose:
2688 		print "This is the mask I will apply: mask.process_inplace(%s,%s)" %(options.mask[0],options.mask[1]) 
2689 		maskimg.process_inplace(options.mask[0],options.mask[1])
2690 		
2691 		print "(e2spt_classaverage)(preprocessing) --mask provided:", options.mask
2692 		#mask.write_image(options.path + '/mask.hdf',-1)
2693 	
2694 	try:
2695 		if options.maskfile:
2696 			maskfileimg = EMData(options.maskfile,0)
2697 		
2698 			#mx = maskfileimg['nx']
2699 			#my = maskfileimg['ny']
2700 			#mz = maskfileimg['nz']
2701 		
2702 			#mxc = mx/2
2703 			#myc = my/2
2704 			#mzc = mz/2
2705 		
2706 			#mnewsize = maskimg['nx']
2707 		
2708 			#r=Region( (2*mxc - mnewsize)/2, (2*myc - mnewsize)/2, (2*mzc - mnewsize)/2, mnewsize , mnewsize , mnewsize)
2709 			#maskfileimg.clip_inplace( r )
2710 			
2711 			if options.clipali or maskfileimg['nx'] !=  maskimg['nx'] or maskfileimg['ny'] !=  maskimg['ny'] or maskfileimg['nz'] !=  maskimg['nz']:
2712 				maskfileimg = clip3D( maskfileimg, maskimg['nx'] )
2713 		
2714 		
2715 			maskimg.mult( maskfileimg )
2716 			
2717 			print "A maskfile was multiplied by the mask", options.maskfile
2718 		else:
2719 			print "Apparently therewas no --maskfile"
2720 			pass
2721 	except:
2722 		print "\nWarning: Ignore if parameter --maskfile does not exist in the program you ran. Otherwise, something went wrong."
2723 
2724 	
2725 	'''
2726 	Set the 'mask' parameter for --normproc if normalize.mask is being used
2727 	'''
2728 	if options.normproc and options.normproc != 'None' and options.normproc != 'none':
2729 		print "normproc is", options.normproc, type(options.normproc)
2730 		if options.normproc[0]=="normalize.mask": 
2731 			options.normproc[1]["mask"]=maskimg
2732 	
2733 	'''
2734 	Normalize-Mask
2735 	'''	
2736 	#if mask and mask != 'None' and mask != 'none' or options.maskfile:
2737 	
2738 	#try:
2739 	#	if options.mask or options.maskfile:
2740 			#if options.shrink:
2741 			#	maskCoarse = mask.copy()
2742 			#	maskCoarse.process_inplace('math.meanshrink',{'n':options.shrink})
2743 	#		print "Masking once"
2744 			#simage.write_image(options.path + '/imgRawClip.hdf',-1)
2745 	#		simage.mult(maskimg)
2746 			#simage.write_image(options.path + '/imgMsk1.hdf',-1)
2747 	#except:
2748 	#	pass
2749 			
2750 	if options.normproc and options.normproc != 'None' and options.normproc != 'none':
2751 		simage.process_inplace(options.normproc[0],options.normproc[1])
2752 		#simage.write_image(options.path + '/imgMsk1norm.hdf',-1)
2753 
2754 		print "(e2spt_classaverage)(preprocessing) --normproc provided:", options.normproc
2755 	
2756 	try:
2757 		#if mask and mask != 'None' and mask != 'none' or options.maskfile:
2758 		if options.mask or options.maskfile:
2759 			print "Masking again after normalizing"
2760 			simage.mult(maskimg)
2761 			#simage.write_image(options.path + '/imgMsk1normMsk2.hdf',-1)
2762 	except:
2763 		pass
2764 		
2765 	#if lowpass or highpass or preprocess or int(shrink) > 1:
2766 	#	simage = filters(options,simage,preprocess,lowpass,highpass,shrink)
2767 	#	#simage.write_image(options.path + '/imgMsk1normMsk2Filts.hdf',-1)
2768 
2769 
2770 	if options.threshold and options.threshold != 'None' and options.threshold != 'none':
2771 		simage.process_inplace(options.threshold[0],options.threshold[1])
2772 		#simage.write_image(options.path + '/imgMsk1normMsk2LpFiltsThr.hdf',-1)
2773 
2774 	'''
2775 	#Preprocess, lowpass and/or highpass
2776 	'''
2777 	
2778 	if not finetag:
2779 		
2780 		if options.lowpass:
2781 			print "(e2spt_classaverage)(preprocessing) --lowpass provided:", options.lowpass
2782 			simage.process_inplace(options.lowpass[0],options.lowpass[1])
2783 			#fimage.write_image(options.path + '/imgPrepLp.hdf',-1)
2784 	
2785 		if options.highpass:
2786 			print "(e2spt_classaverage)(preprocessing) --highpass provided:", options.highpass
2787 			simage.process_inplace(options.highpass[0],options.highpass[1])
2788 			#fimage.write_image(options.path + '/imgPrepLpHp.hdf',-1)
2789 		
2790 		if options.shrink and int( options.shrink  ) > 1:
2791 			print "(e2spt_classaverage)(preprocessing) --shrink provided:", options.shrink
2792 			simage.process_inplace("math.meanshrink",{"n":options.shrink })
2793 		
2794 		if options.preprocess:
2795 			print "(e2spt_classaverage)(preprocessing) --preprocess provided:", options.preprocess
2796 			simage.process_inplace(options.preprocess[0],options.preprocess[1])
2797 			#fimage.write_image(options.path + '/imgPrep.hdf',-1)
2798 		
2799 	else:
2800 		try:
2801 			if options.lowpassfine:
2802 				print "(e2spt_classaverage)(preprocessing) --lowpass provided:", options.lowpassfine
2803 				simage.process_inplace(options.lowpassfine[0],options.lowpassfine[1])
2804 				#fimage.write_image(options.path + '/imgPrepLp.hdf',-1)
2805 	
2806 			if options.highpassfine:
2807 				print "(e2spt_classaverage)(preprocessing) --highpass provided:", options.highpassfine
2808 				simage.process_inplace(options.highpassfine[0],options.highpassfine[1])
2809 				#fimage.write_image(options.path + '/imgPrepLpHp.hdf',-1)
2810 		
2811 		
2812 			if finetag != 'noshrink':
2813 				if options.shrinkfine and int( options.shrinkfine  ) > 1 :
2814 					print "(e2spt_classaverage)(preprocessing) --shrink provided:", options.shrinkfine
2815 					simage.process_inplace("math.meanshrink",{"n":options.shrinkfine })
2816 				#fimage.write_image(options.path + '/imgPrepLpHpSh.hdf',-1)
2817 			
2818 			if options.preprocessfine:
2819 				print "(e2spt_classaverage)(preprocessing) --preprocess provided:", options.preprocessfine
2820 				simage.process_inplace(options.preprocessfine[0],options.preprocessfine[1])
2821 				#fimage.write_image(options.path + '/imgPrep.hdf',-1)
2822 		
2823 		except:
2824 			print "\nWarning: Ignore if parameter --maskfile does not exist in the program you ran. Otherwise, something went wrong."
2825 	
2826 	
2827 	preproclst = ''		
2828 		
2829 	lines=[]
2830 	preproclst = options.path + "/.preproclstfine.txt"	
2831 	if coarse == 'yes':
2832 		preproclst = options.path + "/.preproclstcoarse.txt"
2833 	
2834 	ptclindxtag = str(ptclindx) + '\n'
2835 	
2836 	
2837 	if tag=='ptcls':
2838 	
2839 		try:
2840 		
2841 			#print "Trying to save preprocessed"
2842 			#print "options.savepreprocessed is", options.savepreprocessed
2843 			#print "preproclst is", preproclst
2844 			#print "ptclindxtag is", ptclindxtag
2845 			#print "lines are", lines
2846 			#print "round is", round
2847 			#print "ptclindx is", ptclindx
2848 		
2849 			if options.savepreprocessed and preproclst and ptclindxtag not in lines and round < 1 and ptclindx > -1:
2850 			
2851 				#print "Saving preprocessed!"
2852 	
2853 				os.system('touch ' + preproclst)
2854 				lines = []
2855 			
2856 				if preproclst:
2857 					f=open(preproclst,'r')
2858 					lines=f.readlines()
2859 					f.close()
2860 			
2861 				#indx=-1
2862 		
2863 				#print "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nParticle indx is", ptclindx
2864 		
2865 				#sys.exit() 
2866 		
2867 				rootname=options.input.split('/')[-1]
2868 		
2869 				#if tag == 'ref':
2870 				#	rootname = options.ref
2871 				#	indx=0
2872 			
2873 				preprocname = rootname.replace('.hdf','_preprocCoarse.hdf')
2874 		
2875 				print "coarse is", coarse
2876 				#sys.exit()
2877 		
2878 				if coarse != 'yes':
2879 					preprocname = rootname.replace('.hdf','_preprocFine.hdf')
2880 					print "Saving saved preproc for fine alignment"
2881 					#sys.exit()
2882 	
2883 				if options.path not in preprocname:
2884 					preprocname = options.path + '/' + preprocname
2885 		
2886 				simage.write_image(preprocname,ptclindx)
2887 		
2888 				f=open(preproclst,'a')
2889 				f.write(ptclindxtag)
2890 				f.close()
2891 			
2892 		
2893 				#print "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n******************\n*******************\n"
2894 				#print "Wrote this ptclindxtag", ptclindxtag
2895 				#print "To this ptclst", preproclst
2896 				#print "\n******************\n*******************\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
2897 				#sys.exit()
2898 	
2899 			#sys.exit()
2900 		except:
2901 			if options.savepreprocessed and round == 0:
2902 				print "\nWARNING: preprocessed particles not saved even though --savepreprocessed is on and this is the first iteration."
2903 		
2904 			#print "DID not try to save preproc, although options.savepreprocessed is"
2905 			#print "options.savepreprocessed is", options.savepreprocessed
2906 			#print "preproclst is", preproclst
2907 			#print "ptclindxtag is", ptclindxtag
2908 			#print "lines are", lines
2909 			#print "round is", round
2910 			#print "ptclindx is", ptclindx
2911 		
2912 			#sys.exit()
2913 			#pass
2914 	
2915 	if options.verbose:
2916 		print "Done preprocessing ptcl", ptclindx	
2917 	
2918 	return simage
2919 	
2920 
2921 def makeAverage(options,ic,results,it=0):
2922 	
2923 	klassid = '_even'
2924 	if ic == 1:
2925 		klassid = '_odd'
2926 	if options.goldstandardoff:
2927 		klassid = ''
2928 		
2929 	ptcl_file = options.input
2930 	path = options.path
2931 	#align_params=results
2932 	averager = options.averager
2933 	saveali = options.saveali
2934 	saveallalign = options.saveallalign
2935 	keep = options.keep
2936 	keepsig = options.keepsig
2937 	symmetry = options.sym
2938 	
2939 	breaksym = options.breaksym
2940 	#options.nocenterofmass
2941 	verbose = options.verbose
2942 
2943 	'''Will take a set of alignments and an input particle stack filename and produce a 
2944 	new class-average. Particles may be excluded based on the keep and keepsig parameters. 
2945 	If keepsig is not set, then keep represents an absolute fraction of particles to keep (0-1). 
2946 	Otherwise it represents a sigma multiplier akin to e2classaverage.py.'''
2947 	
2948 	
2949 	print "len of results inside makeAverage is", len(results)
2950 	
2951 	print "(e2pt_classaverage.py)(makeAverage) The results to parse are", 
2952 	
2953 	#for r in results:
2954 	#	print r
2955 	
2956 	#else:
2957 	
2958 	thresh=1.0
2959 	scores = []
2960 	
2961 	#print "results are", results
2962 	#print "\nresults[0] is", results[0]
2963 	#print "\nresults[0][0] is", results[0][0]
2964 	##print "\nresults[0][0[0] is", results[0][0][0]
2965 	#print "\nresults[0][0][0]['xform.align3d'] is", results[0][0][0]["xform.align3d"]
2966 	#print "\nresults[0][-1]", results[0][-1]
2967 	
2968 	
2969 	for indx in results:
2970 		#if 'score' in r[0]:
2971 		#	pass
2972 		#	#print "\nscore!"
2973 		#else:
2974 		#	if r and r[0]:
2975 		#		print "\nIn e2spt_classaverage.py, score not in a non-empty element of p, p[0]", r, r[0]
2976 		#		#print "see, p[0] is", p[0]
2977 		#		sys.exit()
2978 	
2979 		#if r and r[0]:
2980 		#print "\nr is", r
2981 		#print "\nr[0]", r[0]
2982 		#print "\nr[0][0]",r[0][0]
2983 		#print "score in r[0][0]", 'score' in r[0][0]
2984 		#score = r[0][0]['score']
2985 		
2986 		score = results[indx][0]
2987 		scores.append( score )
2988 			#vals.append( score )
2989 				
2990 	if keep < 1.0:
2991 		#print "p[0]['score'] is", align_parms[0]['score']
2992 		print "Len of align_parms is", len(results)
2993 			
2994 			#val=[p[0]["score"] for p in align_parms]
2995 		scores.sort()
2996 		thresh = float( scores[ int( keep * len(scores) ) - 1] )
2997 		if verbose: 
2998 			print "Keep threshold : %f (min=%f  max=%f)"%(thresh,scores[0],scores[-1])
2999 	
3000 	if keepsig:
3001 		# inefficient memory-wise
3002 		
3003 		scores2=[]
3004 		for score in scores:
3005 
3006 			scores2.append(score**2)
3007 			
3008 		val=sum(scores)
3009 		val2=sum(scores2)
3010 
3011 		mean=val/len(scores)
3012 		sig=sqrt(val2/len(scores)-mean*mean)
3013 		thresh = float( mean + sig * keep )
3014 		if verbose: 
3015 			print "Keep threshold : %f (mean=%f  sigma=%f)"%(thresh,mean,sig)
3016 
3017 	"""Make variance image if available"""
3018 	variance = EMData(ptcl_file,0).copy_head()
3019 	if averager[0] == 'mean':
3020 		print "Making variance map"
3021 		averager[1]['sigma'] = variance
3022 	
3023 	print "averager is", averager
3024 	avgr = Averagers.get(averager[0], averager[1])
3025 	included=[]
3026 	
3027 	#print "The path to save the alignments is", path
3028 	#jsdict = path + '/sptali_ir.json'
3029 	#js = js_open_dict(jsdict)
3030 	
3031 	#maxscore = None
3032 	#try:
3033 	maxscore = min( scores )
3034 	
3035 	print "\n(e2spt_classaverage)(makeAverage) maxscore is", maxscore
3036 	
3037 	#except:
3038 	#	print "There are no scores!", scores
3039 		
3040 	writeali = 0
3041 	aliptcls = path + '/aliptcls' + klassid + '.hdf'
3042 	
3043 	weights={}
3044 	
3045 	try:
3046 		if options.saveallalign:
3047 			writeali = 1
3048 			aliptcls = path + '/aliptcls' + klassid + '_' + str(it).zfill( len(str(options.iter)) ) + '.hdf'
3049 
3050 		elif saveali and it == options.iter - 1:
3051 			writeali = 1
3052 	except: #The exception should be triggered when e2spt_hac.py is called since it doesn't have the --iter parameter.
3053 		if options.saveali:
3054 			writeali = 1		#This saves the aligned particles across ALL iterations for HAC -probably shouldn't be done.
3055 		
3056 		
3057 	#for i,ptcl_parms in enumerate(align_parms):
3058 	
3059 	ii=0
3060 	for indx in results:
3061 		#ptclindx = r[1]
3062 		ptclindx = indx
3063 		print "ptclindx is", ptclindx
3064 		ptcl = EMData(ptcl_file,ptclindx)
3065 		weight = 1.0
3066 		
3067 		#if r and r[0]:
3068 		
3069 		#ptcl.process_inplace("xform",{"transform":r[0][0]["xform.align3d"]})
3070 		ptcl.process_inplace("xform",{"transform":results[indx][1]})
3071 		#print "I have applied this transform before averaging", ptcl_parms[0]["xform.align3d"]			
3072 	
3073 		#score = r[0][0]["score"]
3074 		score = results[indx][0]
3075 		print "\n!!!score is", score
3076 		print "and thresh is", thresh
3077 		
3078 		if score <= thresh:
3079 			#if thresh != 1.0:
3080 		
3081 			print "particle kept because its score %f is LOWER (which means BETTER, in EMAN2 good scores are more negative) than the threshold %f, when the best score was %f" %( score, thresh, maxscore )
3082 			#else:
3083 			#	print "Particle kept because its score %f is LOWER than the DEFAULT threshold %f, when the best score was %f" %( score, thresh, maxscore )
3084 
3085 							
3086 			#print "preavgproc1 and len and type are", options.preavgproc1, len(options.preavgproc1), type(options.preavgproc1)
3087 			#print "preavgproc2 and len are", options.preavgproc2, len(options.preavgproc2),  type(options.preavgproc2)
3088 			
3089 			try:
3090 				if options.preavgproc1:
3091 					ptcl.process_inplace( options.preavgproc1[0], options.preavgproc1[1] )
3092 			except:
3093 				print """ERROR: Preavgproc1 probably requires parameters you did not specify.
3094 					For example, --preavgproc1=threshold.clampminmax.nsigma would fail.
3095 					You need to specify an integer for nsgima, e.g.:
3096 					--preavgproc1=threshold.clampminmax.nsigma:nsimga=2."""	
3097 				sys.exit()		 
3098 			try:
3099 				if options.preavgproc2:
3100 					ptcl.process_inplace( options.preavgproc2[0], options.preavgproc2[1] )
3101 			except:
3102 				print """ERROR: Preavgproc2 probably requires parameters you did not specify.
3103 					For example, --preavgproc2=threshold.clampminmax.nsigma would fail.
3104 					You need to specify an integer for nsgima, e.g.:
3105 					--preavgproc1=threshold.clampminmax.nsigma:nsimga=2."""
3106 				sys.exit()	
3107 					
3108 			if options.weighbytiltaxis:
3109 				print "\n--weighbytiltaxis is", options.weighbytiltaxis
3110 				px = x = int(ptcl['ptcl_source_coord'][0])
3111 				
3112 				tiltaxis = int( options.weighbytiltaxis.split(',')[0] )
3113 				minweight = float( options.weighbytiltaxis.split(',')[1] )
3114 				
3115 				if px > tiltaxis:
3116 					px = -1 *( px - 2*tiltaxis )	#This puts te particle at the same distance from te tilt axis, but to the left of it.
3117 				
3118 				X = tiltaxis				#This models a line in 'weight space' (x, w), that passes through (0, minweight) and ( tiltaxis, maxweight ) 
3119 				W = 1.0 - minweight
3120 				slope = W/X
3121 										#Having the slope of the line and its y-axis (or w-axis in this case) crossing we predict the weight of any particle depending on its dx distance to the tiltaxis
3122 				print "tiltaxis is", X
3123 				print "W is", W
3124 				print "Therefore slope is", slope
3125 				
3126 				dx = tiltaxis - px 
3127 				taweight = slope * px + minweight 
3128 				weight = weight * ( taweight )
3129 				print "tiltaxis weight was %f because it's distance from the tilt axis is %d, because it's x coordinate was %d" % (taweight, dx, x)
3130 
3131 			if options.weighbyscore:
3132 				scoreweight = score / maxscore
3133 				print "the score weight is %f because score was %f and the best score was %f" % (scoreweight, score, maxscore )
3134 				weight = weight * scoreweight
3135 			
3136 			weights.update( {ptclindx:weight} )
3137 				
3138 			print "therefore the final weight for particle %d is %f" %(ptclindx, weight )
3139 				
3140 			ptcl.mult( weight )
3141 			avgr.add_image( ptcl )
3142 			included.append( ptclindx )
3143 			print "\nptcl %d added (included in average) because its score %.6f is below (better) the threshold %f" %(ptclindx,score,thresh) 
3144 			
3145 		
3146 		else:
3147 			print "\nptcl %d skipped (not included in average) because its score %.6f is above (worse) the threshold %f" %(ptclindx,score,thresh) 
3148 			weights.update( {ptclindx:0.0} )
3149 		
3150 		#js["subtomo_%04d"%i] = ptcl_parms[0]['xform.align3d']
3151 	
3152 		if writeali:
3153 			ptcl['origin_x'] = 0
3154 			ptcl['origin_y'] = 0		
3155 			ptcl['origin_z'] = 0
3156 			ptcl['spt_score'] = score
3157 		
3158 			ptcl['xform.align3d'] = Transform()
3159 			#ptcl['xform.align3d'] = r[0][0]["xform.align3d"]
3160 			ptcl['xform.align3d'] = results[indx][1]
3161 			
3162 			#originalindex = i*2
3163 			#if ic == 1:
3164 			#	originalindex = i*2 + 1 
3165 			#if options.goldstandardoff:
3166 			#	originalindex = i
3167 				
3168 			ptcl['spt_indx_original']=ptclindx
3169 		
3170 			ptcl.write_image(aliptcls,ii)
3171 		
3172 		ii+=1
3173 			
3174 					
3175 	#js.close()
3176 	
3177 	if verbose: 
3178 		print "Kept %d / %d particles in average"%(len(included),len(results))
3179 
3180 	print "Will finalize average"
3181 
3182 	avg=avgr.finish()
3183 	print "done"
3184 	
3185 	
3186 	if avg:	
3187 		if symmetry and not breaksym:
3188 			avg = avg.process('xform.applysym',{'sym':symmetry})
3189 	
3190 		avg["class_ptcl_idxs"]=included
3191 		avg["class_ptcl_src"]=ptcl_file
3192 		avg['spt_multiplicity']=len(included)
3193 		avg['spt_ptcl_indxs']=included
3194 	
3195 		if averager[0] == 'mean':
3196 			varmapname = path + '/class' + klassid + '_varmap.hdf'
3197 			variance.write_image( varmapname , it)
3198 				
3199 		#if not nocenterofmass:
3200 		#	avg.process_inplace("xform.centerofmass")
3201 	
3202 	
3203 		if options.autocenter:
3204 			print "\n\n\n\nYou have selected to autocenter!\n", options.autocenter
3205 		
3206 			avgac = avg.copy()
3207 			if options.autocentermask:
3208 				avgac.process_inplace( options.autocentermask[0],options.autocentermask[1] )
3209 			
3210 			if options.autocenterpreprocess:
3211 				apix = avgc['apix_x']
3212 				halfnyquist = apix*4
3213 				highpassf = apix*a['nx']/2.0
3214 			
3215 				avgac.process_inplace( 'filter.highpass.gauss',{'cutoff_freq':highpassf,'apix':apix})
3216 				avgac.process_inplace( 'filter.lowpass.gauss',{'cutoff_freq':halfnyquist,'apix':apix})
3217 				avgac.process_inplace( 'math.meanshrink',{'n':2})
3218 			
3219 			avgac.process_inplace(options.autocenter[0],options.autocenter[1])
3220 		
3221 			tcenter = avgac['xform.align3d']
3222 			print "Thus the average HAS BEEN be translated like this", tcenter
3223 			avg.transform(tcenter)
3224 
3225 		avg['origin_x']=0
3226 		avg['origin_y']=0
3227 		avg['origin_z']=0
3228 	
3229 		return [avg,weights]
3230 	else:
3231 		print "\nERROR: for class %d in iteration %d failed to compute average (the average is empty)" %(ic,it)
3232 		sys.exit()
3233 		return [avg,weights]
3234 
3235 
3236 def get_results(etc,tids,verbose,nptcls,refmethod=''):
3237 	'''This will get results for a list of submitted tasks. Won't return until it has all requested results.
3238 	aside from the use of options["ptcl"] this is fairly generalizable code.'''
3239 	
3240 	# wait for them to finish and get the results
3241 	# results for each will just be a list of (qual,Transform) pairs
3242 	
3243 	#results=[ [ '' ] ]*nptcls
3244 	results=[ 0 ]*nptcls
3245 	
3246 	#print "tids inside get_results are", tids
3247 	
3248 	if refmethod == 'binarytree':
3249 		results=[0]*len(tids)		# storage for results
3250 	
3251 	ncomplete=0
3252 	tidsleft = tids[:]
3253 	#print "before loop, tidsleft are", tidsleft
3254 	
3255 	while 1:
3256 		time.sleep(5)
3257 		proglist = etc.check_task(tidsleft)
3258 		nwait = 0
3259 		for i,prog in enumerate(proglist):
3260 			if prog == -1: 
3261 				nwait += 1
3262 			
3263 			if prog == 100:
3264 				r = etc.get_results( tidsleft[i] ) 			# results for a completed task
3265 				
3266 				#print "r is", r
3267 				ptcl = r[0].classoptions['ptclnum']			# get the particle number from the task rather than trying to work back to it
3268 				#print "ptcl is", ptcl
3269 				#print "results inside get_results are", results
3270 				
3271 				
3272 				if r[1]['final']:
3273 					results[ptcl] = [ filter(None,r[1]['final']) , ptcl, filter(None,r[1]['coarse']) ]					# this will be a list of (qual,Transform)
3274 					
3275 				#print "ptcl and type are", ptcl, type(ptcl)
3276 				#print "results[ptcl] are", results[ptcl]
3277 				#print "because results are", results
3278 				
3279 				'''
3280 				if savealiparams and results and results[ptcl]:
3281 					xformslabel = 'subtomo_' + str( ptcl ).zfill( len( str(nptcls) ) )
3282 			
3283 					AliParams=results[ptcl][0]['xform.align3d']
3284 					score = float(results[ptcl][0]['score'])
3285 					jsA.setval( xformslabel, [ AliParams , score ] )
3286 				'''
3287 				
3288 				ncomplete+=1
3289 		
3290 		tidsleft=[j for i,j in enumerate(tidsleft) if proglist[i]!=100]		# remove any completed tasks from the list we ask about
3291 		if verbose:
3292 			# removed nwait here. It was giving an incorrect perception, since the tasks are apparently not updating progress as they should
3293 			print "  %d tasks, %d complete        \r"%(len(tids),ncomplete)
3294 			sys.stdout.flush()
3295 	
3296 		if len(tidsleft)==0: break
3297 		
3298 	return filter(None,results)
3299 
3300 
3301 def wedgestats(volume,angle, wedgei, wedgef, options):
3302 	#print "RECEIEVED, in wedge statistics, angle, wedgei and wedgef", angle, wedgei, wedgef
3303 	vfft = volume.do_fft()
3304 	print "Size of vfft is", vfft['nx'],vfft['ny'],vfft['nz']
3305 	wedge = vfft.getwedge(angle, wedgei, wedgef)
3306 	
3307 	mean = vfft.get_attr('spt_wedge_mean')
3308 	sigma = vfft.get_attr('spt_wedge_sigma')
3309 
3310 	wedge.process_inplace('xform.phaseorigin.tocenter')
3311 	symwedge = wedge.process('xform.mirror', {'axis':'x'})
3312 	
3313 	#print "Size of symwedge is", symwedge['nx'],symwedge['ny'],symwedge['nz']
3314 	finalwedge = wedge + symwedge
3315 	
3316 	#finalwedge.process_inplace('threshold.binary',{'value':0.0})
3317 
3318 	#print "Size of finalwedge is", finalwedge['nx'],finalwedge['ny'],finalwedge['nz']
3319 	#finalwedge_otherhalf = finalwedge.copy()
3320 	#finalwedge_otherhalf.rotate(0,180,0)
3321 	
3322 	'''
3323 	Compute fft amps of the vol and center
3324 	'''
3325 	print "Size of vfft BEFORE real is", vfft['nx'],vfft['ny'],vfft['nz']
3326 	vfft.ri2ap()
3327 	ampsOrig = vfft.amplitude()
3328 	amps = ampsOrig.process('xform.phaseorigin.tocenter')
3329 	symamps = amps.process('xform.mirror', {'axis':'x'})
3330 	finalamps = amps + symamps
3331 		
3332 	#print "Size of amps is", amps['nx'],amps['ny'],amps['nz']
3333 	
3334 	sigmas = options.aligncmp[1]['sigmas']
3335 	print "Sigmas is", sigmas
3336 	
3337 	thresh = math.pow( mean + sigmas * sigma, 2.0 )
3338 	print "Therefore thresh = mean + (sigmas*sigma)^2 is", thresh
3339 	
3340 	#print "Size of symamps is", symamps['nx'],symamps['ny'],symamps['nz']
3341 	
3342 	ampsThresh = finalamps.process('threshold.belowtozero',{'minval':thresh})
3343 	
3344 	#ampsOrigThresh.ap2ri()
3345 	#ampsOrigThresh.do_iff()
3346 
3347 	#print "Size of finalamps is", finalamps['nx'],finalamps['ny'],finalamps['nz']
3348 	
3349 	print "\nType of wedge is", type(finalwedge)
3350 	print "\nType of amps is", type(finalamps)
3351 	print "\nType of ampsThresh is", type(ampsThresh)
3352 	
3353 	if options.writewedge:
3354 		
3355 		completePath = os.getcwd() + '/' + options.path
3356 		print "\nThe list of files in path is", os.listdir( completePath )
3357 		 
3358 		wedgename = os.getcwd() + '/' + options.path + '/wedge.hdf'
3359 		finalwedge.write_image(wedgename,0)
3360 		
3361 		ampsname = os.getcwd() + '/' + options.path +'/fftamps.hdf'
3362 		finalamps.write_image(ampsname,-1)
3363 		
3364 		ampsThreshname = os.getcwd() + '/' + options.path + '/fftampsThresh.hdf'
3365 		ampsThresh.write_image(ampsThreshname,-1)	
3366 	
3367 	return(mean,sigma,thresh)
3368 
3369 '''
3370 CLASS TO PARALLELIZE ALIGNMENTS
3371 '''
3372 class Align3DTask(JSTask):
3373 	"""This is a task object for the parallelism system. It is responsible for aligning one 3-D volume to another, with a variety of options"""
3374 
3375 	#def __init__(self,fixedimage,image,ptcl,label,mask,normproc,preprocess,lowpass,highpass,npeakstorefine,align,aligncmp,falign,faligncmp,shrink,shrinkfine,transform,verbose,randomizewedge,wedgeangle,wedgei,wedgef):
3376 	def __init__(self,fixedimage,image,ptclnum,label,options,transform,currentIter,nptclsexception=0):
3377 	
3378 		"""fixedimage and image may be actual EMData objects, or ["cache",path,number]
3379 		label is a descriptive string, not actually used in processing
3380 		ptcl is not used in executing the task, but is for reference
3381 		other parameters match command-line options from e2spt_classaverage.py
3382 		Rather than being a string specifying an aligner, 'align' may be passed in as a 
3383 		Transform object, representing a starting orientation for refinement"""
3384 		
3385 		
3386 		#data={}
3387 		
3388 		data={"fixedimage":fixedimage,"image":image}
3389 		
3390 		JSTask.__init__(self,"ClassAv3d",data,{},"")
3391 
3392 		#self.classoptions={"options":options,"ptcl":ptcl,"label":label,"mask":options.mask,"normproc":options.normproc,"preprocess":options.preprocess,"lowpass":options.lowpass,"highpass":options.highpass,"npeakstorefine":options.npeakstorefine,"align":options.align,"aligncmp":options.aligncmp,"falign":options.falign,"faligncmp":options.faligncmp,"shrink":options.shrink,"shrinkfine":options.shrinkfine,"transform":transform,"verbose":options.verbose,"randomizewedge":options.randomizewedge,"wedgeangle":options.wedgeangle,"wedgei":options.wedgei,"wedgef":options.wedgef}
3393 		self.classoptions={"options":options,"ptclnum":ptclnum,"label":label,"transform":transform,"currentIter":currentIter,'nptclsexception':nptclsexception}
3394 	
3395 	def execute(self,callback=None):
3396 		"""This aligns one volume to a reference and returns the alignment parameters"""
3397 		classoptions=self.classoptions
3398 
3399 		#print "classptclnum received", classoptions['ptclnum']
3400 		
3401 		if isinstance(self.data["fixedimage"],EMData):
3402 			fixedimage=self.data["fixedimage"]
3403 		else: 
3404 			fixedimage=EMData(self.data["fixedimage"][1],self.data["fixedimage"][2])
3405 		
3406 		if isinstance(self.data["image"],EMData) :
3407 			image=self.data["image"]
3408 		else: 
3409 			#print "\nImage to align is", self.data["image"][1], self.data["image"][2]
3410 			#print "Inside path", classoptions['options'].path
3411 			image=EMData(self.data["image"][1],self.data["image"][2])
3412 		
3413 		
3414 		#print "With image sizes img and fixedimg", image['nx'],fixedimage['nx']
3415 		
3416 		"""
3417 		CALL the alignment function
3418 		"""
3419 		nptcls = EMUtil.get_image_count(classoptions['options'].input)
3420 		
3421 		if classoptions['nptclsexception']:
3422 			nptcls = classoptions['nptclsexception']
3423 			nptcls = 1
3424 			classoptions['ptclnum'] = 0
3425 			
3426 			#print "classptclnum CHANGED TO", classoptions['ptclnum']
3427 			
3428 		#print "\n(e2spt_classaverage)(Align3DTaks)(execute) nptcls is", nptcls
3429 		#print "classoptions are", classoptions
3430 		
3431 		xformslabel = 'subtomo_' + str(classoptions['ptclnum']).zfill( len( str(nptcls) ) )
3432 		
3433 		refpreprocess=0
3434 		options=classoptions['options']
3435 		
3436 		#Need to use try/except because other alignment programs do not have the --ref option or --refpreprocess
3437 		
3438 		try:	
3439 			if options.refpreprocess:
3440 				refpreprocess=1
3441 			else:
3442 				refpreprocess=0
3443 		except:
3444 			refpreprocess=1
3445 		
3446 		try:
3447 			#print "inside class, options.ref and type and len are", options.ref, type(options.ref), len(str(options.ref))
3448 	
3449 			if not options.ref or options.ref == '':
3450 				refpreprocess=1
3451 				#print "\n\n\n\n(e2spt_classaverage)(Align3DTask) There is no reference; therfore, refpreprocess should be turned on", refpreprocess
3452 				
3453 		except:
3454 			#print "inside class options.ref doesnt exist therefore refpreprocess is 1"
3455 			refpreprocess=1
3456 		
3457 		#After the first iteration, refpreprocess is always on. It should be turned on manually by the user if a non-crystal structure reference is provided.
3458 		currentIter=self.classoptions['currentIter']
3459 		
3460 		try:
3461 			if int(options.iter) > 1 and currentIter > 0:
3462 				refpreprocess=1
3463 		except:
3464 			refpreprocess=1
3465 			
3466 		if options.verbose:
3467 			print "\n\n!!!!!!!!!!!!!!!!!!!!!!!!\n(e2spt_classaverage)(Align3DTask) Aligning ",classoptions['label']
3468 			#print "\n\!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!n\n\n\n\n\n\n"
3469 		
3470 		
3471 		#print "SENDING reference image in alignment and values are", fixedimage, fixedimage['minimum'],fixedimage['maximum'],fixedimage['sigma'],fixedimage['mean']
3472 		
3473 		if not fixedimage['maximum'] and not fixedimage['minimum']:
3474 			#print "Error. Empty reference."
3475 			sys.exit()
3476 		
3477 		if nptcls == 1:
3478 			classoptions['ptclnum'] = 0
3479 		
3480 		
3481 		ret = alignment(fixedimage,image,classoptions['label'],classoptions['options'],xformslabel,classoptions['currentIter'],classoptions['transform'],'e2spt_classaverage',refpreprocess)
3482 		
3483 		bestfinal=ret[0]
3484 		bestcoarse=ret[1]
3485 
3486 		if nptcls == 1:
3487 			#print "\n\n\nCCCCCCCCCCCCCCC\nRet from alignment is", ret
3488 			#print "therefore received from alignment in class"
3489 			#print "best final", bestfinal
3490 			#print "and best coarse", bestcoarse
3491 			
3492 			pass
3493 			
3494 			#sys.exit()
3495 		
3496 		return {"final":bestfinal,"coarse":bestcoarse}
3497 
3498 '''
3499 FUNCTION FOR RUNNING ALIGNMENTS WITHOUT PARALLELISM
3500 '''
3501 def align3Dfunc(fixedimage,image,ptclnum,label,options,transform,currentIter):
3502 	"""This aligns one volume to a reference and returns the alignment parameters"""
3503 
3504 	if options.verbose: 
3505 		#print "(e2spt_classaverage)(align3Dfunc) Aligning ",label
3506 		pass
3507 		
3508 	#print "In align3Dfunc fixed image and its type are" , fixedimage, type(fixedimage)
3509 	if type(fixedimage) is list:
3510 		fixedimage=EMData(fixedimage[1],fixedimage[2])
3511 	
3512 	if type(image) is list:
3513 		image=EMData(image[1],image[2])
3514 	
3515 	if not options.notmatchimgs:
3516 		fixedimage.process_inplace( 'filter.matchto',{'to':image})
3517 
3518 	"""
3519 	CALL the alignment function
3520 	"""
3521 	
3522 	nptcls = EMUtil.get_image_count(options.input)
3523 	xformslabel = 'subtomo_' + str(ptclnum).zfill( len( str(nptcls) ) )
3524 	
3525 	refpreprocess=0
3526 	
3527 	
3528 	#print "Inside func, options.ref and type and len are", options.ref, type(options.ref), len(str(options.ref))
3529 	if not options.ref or options.ref == '':
3530 		refpreprocess=1
3531 		#print "\n(e2spt_classaverage)(align3Dfunc) There is no reference; therfore, refpreprocess should be turned on", refpreprocess
3532 
3533 	if options.refpreprocess:
3534 		refpreprocess=1
3535 	
3536 	try:
3537 		if int(options.iter) > 1 and currentIter > 0:
3538 			refpreprocess=1
3539 	except:
3540 		refpreprocess=1
3541 	
3542 	#print "SENDING reference image in alignment and values are", fixedimage, fixedimage['minimum'],fixedimage['maximum'],fixedimage['sigma'],fixedimage['mean']
3543 	
3544 	ret=alignment(fixedimage,image,label,options,xformslabel,currentIter,transform,'e2spt_classaverage',refpreprocess)
3545 
3546 	bestfinal=ret[0]
3547 	bestcoarse=ret[1]
3548 	
3549 	#print "\n\n\nReceived from alignment in 3dfunc, bestfinal", bestfinal
3550 	
3551 	if not fixedimage['maximum'] and not fixedimage['minimum']:
3552 		print "Error. Empty reference."
3553 		sys.exit()
3554 	
3555 	
3556 	return {"final":bestfinal,"coarse":bestcoarse}
3557 
3558 
3559 '''
3560 FUNCTION THAT DOES THE ACTUAL ALIGNMENT OF TWO GIVEN SUBVOLUMES -This is also used by e2spt_hac.py, 
3561 e2spt_binarytree.py and e2spt_refinemulti.py, any modification to it or its used parameters 
3562 should be made with caution
3563 '''
3564 def alignment( fixedimage, image, label, options, xformslabel, iter, transform, prog='e2spt_classaverage', refpreprocess=0 ):
3565 	
3566 	gc.collect() 	#free up unused memory
3567 	
3568 	from operator import itemgetter	
3569 	
3570 	
3571 	if not options.notmatchimgs:
3572 		#print "Matching images!"
3573 		#print "Thhere is matchto because notmatch is False, see", classoptions['options'].notmatchimgs 
3574 			
3575 		fixedimage.process_inplace( 'filter.matchto',{'to':image})
3576 	
3577 	
3578 	if options.verbose: 
3579 		print "\n\n!!!!\n(e2spt_classaverage)(alignment)Aligning ",label
3580 		#print "\n\!!!!!n\n\n\n\n\n\n"
3581 	
3582 	
3583 	round=iter
3584 	#print "label is", label
3585 	try:
3586 		ptclindx = int(label.split(' ')[1])
3587 		refindx = -1
3588 	except:
3589 		try:
3590 			ptclindx = int( image['spt_ptcl_indxs'] )							#Deal with --savepreprocessed for e2spt_hac.py
3591 			refindx = int( fixedimage['spt_ptcl_indxs'] )
3592 			#round = int(xformslabel.split('_')[0].replace('round',''))
3593 		except:
3594 			try:
3595 				ptclindx = int( label.split('#')[-1].split(' ')[0] )			#Deal with --savepreprocessed for binary tree initial model building (do NOT savepreprocessed)
3596 				refindx = ptclinx +1
3597 				round = -1
3598 				
3599 				ptclindx = refindx =-1
3600 				
3601 			except:
3602 				ptclindx = refindx = 0
3603 	
3604 	
3605 	#print "Using this ptcl indx in alignment!", ptclindx
3606 	"""
3607 	PREPROCESSING CALL 
3608 	Currently applied to both volumes. Often 'fixedimage' will be a reference, so may need to rethink whether it should be treated identically. 
3609 	Similar issues in 2-D single particle refinement ... handled differently at the moment
3610 	"""
3611 	
3612 	
3613 	#########################################
3614 	#Preprocess the reference or "fixed image", only if rotate_translate_3d_tree is not used
3615 	#########################################
3616 	reffullsize = fixedimage.copy()
3617 	sfixedimage = fixedimage.copy()
3618 	s2fixedimage = fixedimage.copy()
3619 	
3620 	print "s2fixedimage starts with size", s2fixedimage['nx']
3621 	#sys.exit()
3622 	
3623 	if options.clipali:
3624 		if sfixedimage['nx'] != options.clipali or sfixedimage['ny'] != options.clipali or sfixedimage['nz'] != options.clipali:
3625 			
3626 			sfixedimage = clip3D( sfixedimage, options.clipali )
3627 			print "clipping reference for coarse alignment", options.clipali, sfixedimage['nx']
3628 			#print "\nclipped sfixedimage to", options.clipali, sfixedimage['nx']
3629 		
3630 		if s2fixedimage['nx'] != options.clipali or s2fixedimage['ny'] != options.clipali or s2fixedimage['nz'] != options.clipali:
3631 			s2fixedimage = clip3D( s2fixedimage, options.clipali )
3632 			print "clipping reference for fine alignment", options.clipali, s2fixedimage['nx']
3633 			
3634 			#print "\nclipped s2fixedimage to", options.clipali, s2fixedimage['nx']
3635 		
3636 		if reffullsize['nx'] != options.clipali or reffullsize['ny'] != options.clipali or reffullsize['nz'] != options.clipali:
3637 			reffullsize = clip3D( reffullsize, options.clipali )
3638 			print "clipping full-sized reference for final tweaking alignment", options.clipali, reffullsize['nx']
3639 		
3640 		
3641 	print "before refpreprocess, refpreprocess, iter", refpreprocess, iter
3642 	
3643 	
3644 	if 'rotate_translate_3d_tree' not in options.align[0]:	
3645 	
3646 		if not refpreprocess:
3647 			print "\nthere is NO refpreprocess! But an external reference WAS provided, type, len", options.ref
3648 	
3649 			if options.shrink and int(options.shrink) > 1:
3650 				print "shrinking sfixedimage BEFORE, iter", sfixedimage['nx'], iter
3651 				sfixedimage = sfixedimage.process('math.meanshrink',{'n':options.shrink})
3652 				print "shrinking sfixedimage AFTER, iter", sfixedimage['nx'], iter
3653 		
3654 	
3655 			if options.falign and options.falign != None and options.falign != 'None' and options.falign != 'none':
3656 				print "there's fine alignment"
3657 				if options.procfinelikecoarse:
3658 					print "there's procfinelikecoarse"
3659 					s2fixedimage = sfixedimage.copy()
3660 		
3661 				elif options.shrinkfine and int(options.shrinkfine) > 1:
3662 					s2fixedimage = s2fixedimage.process('math.meanshrink',{'n':options.shrinkfine})
3663 					print "shrinking reference for fine alignment!!!!, iter", options.shrinkfine, s2fixedimage['nx'], iter
3664 
3665 			else:
3666 				#s2fixedimage = sfixedimage.copy()
3667 				#s2fixedimage = fixedimage.copy()
3668 				s2fixedimage = None
3669 
3670 
3671 		elif refpreprocess:
3672 			print "there is refpreprocess, iter", iter
3673 			savetag = 'ref'
3674 			if 'odd' in label or 'even' in label:
3675 				savetag = ''
3676 		
3677 			if options.threshold or options.normproc or options.mask or options.preprocess or options.lowpass or options.highpass or int(options.shrink) > 1:
3678 				#print "\nThere IS refpreprocess!"	
3679 		
3680 				print "BEFORE preprocessing coarse ref, because there is refpreprocess, size is %d, iter %d" %( sfixedimage['nx'],iter )
3681 				sfixedimage = preprocessing(sfixedimage,options, refindx, savetag ,'yes',round)
3682 				print "AFTER preprocessing coarse ref, because there is refpreprocess, size is %d, iter %d" %( sfixedimage['nx'],iter )
3683 
3684 			#Only preprocess again if there's fine alignment, AND IF the parameters for fine alignment are different
3685 		
3686 			if options.falign and options.falign != None and options.falign != 'None' and options.falign != 'none':
3687 				#if options.procfinelikecoarse:
3688 				#	s2fixedimage = sfixedimage.copy()
3689 				#	print "REFERENCE fine preprocessing is equal to coarse"
3690 		
3691 				print "procfinelikecoarse is", options.procfinelikecoarse
3692 		
3693 				if options.procfinelikecoarse:
3694 					s2fixedimage = sfixedimage.copy()
3695 		
3696 				elif options.preprocessfine or options.lowpassfine or options.highpassfine or int(options.shrinkfine) > 1:
3697 					print "BEFORE preprocessing fine ref, because there is refpreprocess, size is %d, iter %d" %( s2fixedimage['nx'],iter)
3698 
3699 					s2fixedimage = preprocessing(s2fixedimage,options,refindx, savetag ,'no',round,'fine')
3700 	
3701 					print "AFTER preprocessing fine ref, because there is refpreprocess, size is %d, iter %d" %( s2fixedimage['nx'],iter)
3702 
3703 			else:
3704 				#s2fixedimage = sfixedimage.copy()
3705 				#s2fixedimage = fixedimage.copy()	
3706 				s2fixedimage = None
3707 
3708 		if sfixedimage:
3709 			if options.verbose:
3710 				print "after all preproc, COARSE ref is of size %d, in iter %d" %( sfixedimage['nx'], iter)
3711 
3712 		if s2fixedimage:
3713 			if options.verbose:
3714 				print "after all preproc, FINE ref is of size %d, in iter %d" %( s2fixedimage['nx'], iter)
3715 	
3716 		if reffullsize:
3717 			if options.verbose:
3718 				print "after all preproc, REFFULLSIZE is of size %d, in iter %d" %( reffullsize['nx'], iter)
3719 
3720 	#########################################
3721 	#Preprocess the particle or "moving image", only if rotate_translate_3d_tree is not used
3722 	#########################################
3723 	imgfullsize = image.copy()
3724 	simage = image.copy()
3725 	s2image = image.copy()
3726 	
3727 	if options.clipali:
3728 		if simage['nx'] != options.clipali or simage['ny'] != options.clipali or simage['nz'] != options.clipali:
3729 			simage = clip3D( simage, options.clipali )
3730 			
3731 			#print "\nclipped simage to", options.clipali, simage['nx']
3732 
3733 		
3734 		if s2image['nx'] != options.clipali or s2image['ny'] != options.clipali or s2image['nz'] != options.clipali:
3735 			s2image = clip3D( s2image, options.clipali )
3736 		
3737 		if imgfullsize['nx'] != options.clipali or imgfullsize['ny'] != options.clipali or imgfullsize['nz'] != options.clipali:
3738 			imgfullsize = clip3D( imgfullsize, options.clipali )
3739 			
3740 			#print "\nclipped s2image to", options.clipali, s2image['nx']
3741 
3742 	
3743 	
3744 	savetagp = 'ptcls'
3745 	if 'odd' in label or 'even' in label:
3746 		savetag = ''
3747 	
3748 	
3749 	
3750 	if 'rotate_translate_3d_tree' not in options.align[0]:
3751 	
3752 		if options.threshold or options.normproc or options.mask or options.preprocess or options.lowpass or options.highpass or int(options.shrink) > 1:
3753 
3754 			#print "\n\n\n\n\n\n\n\n\n\n\nSending moving particle to preprocessing. It's size is", simage['nx'],simage['ny'],simage['nz']
3755 
3756 			simage = preprocessing(simage,options,ptclindx, savetagp ,'yes',round)
3757 
3758 		#print "preprocessed moving particle has size", simage['nx']
3759 
3760 		#Only preprocess again if there's fine alignment, AND IF the parameters for fine alignment are different
3761 
3762 		#print "options.falign is", options.falign
3763 
3764 		if options.falign and options.falign != None and options.falign != 'None' and options.falign != 'none': 
3765 			if options.procfinelikecoarse:
3766 				s2image = simage.copy()
3767 				#print "PARTICLE fine preprocessing is equal to coarse"
3768 			elif options.preprocessfine or options.lowpassfine or options.highpassfine or int(options.shrinkfine) > 1:
3769 				s2image = preprocessing(s2image,options,ptclindx, savetagp ,'no',round,'fine')
3770 				#print "There was fine preprocessing"
3771 			#sys.exit()
3772 		else:
3773 			#s2image = simage.copy()
3774 			#s2image = image.copy()
3775 			s2image = None
3776 
3777 		if simage:
3778 			if options.verbose:
3779 				print "after all preproc, COARSE ptcl is of size %d, in iter %d" %( simage['nx'], iter)
3780 
3781 		if s2image:
3782 			if options.verbose:
3783 				print "after all preproc, FINE ptcl is of size %d, in iter %d" %( s2image['nx'], iter)	
3784 	
3785 		if imgfullsize:
3786 			if options.verbose:
3787 				print "after all preproc, IMGFULLSIZE is of size %d, in iter %d" %( imgfullsize['nx'], iter)
3788 
3789 	
3790 	
3791 	if sfixedimage['nx'] != simage['nx']:
3792 		print "ERROR: preprocessed images for coarse alignment not the same size, sfixedimage, simage", sfixedimage['nx'], simage['nx']
3793 		#print "ERROR: preprocessed images for coarse alignment not the same size"
3794 		sys.exit()
3795 	
3796 	if 'rotate_translate_3d_tree' not in options.align[0]:
3797 		if options.falign and s2fixedimage and s2image:
3798 			if s2fixedimage['nx'] != s2image['nx']:
3799 				print "ERROR: preprocessed images for fine alignment not the same size, s2fixedimage, s2image", s2fixedimage['nx'], s2image['nx']
3800 				#print "ERROR: preprocessed images for fine alignment not the same size"
3801 				sys.exit()
3802 	
3803 	if transform:
3804 		#print "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nThere WAS a transform, see", transform
3805 		#print "And its type is", type(transform)
3806 		#if classoptions.verbose:
3807 		#	print "Moving Xfrom", transform
3808 		#options["align"][1]["inixform"] = options["transform"]
3809 		if options.align:
3810 			#print "There was classoptions.align"
3811 			#print "and classoptions.align[1] is", classoptions.align[1]
3812 			if options.align[1]:
3813 				options.align[1]["transform"] = transform
3814 	
3815 		if int(options.shrink) > 1:
3816 			#options["align"][1]["inixform"].set_trans( options["align"][1]["inixform"].get_trans()/float(options["shrinkfine"]) )
3817 			options.align[1]["transform"].set_trans( options.align[1]["transform"].get_trans()/float(options.shrinkfine) )
3818 	
3819 	elif options.randomizewedge:
3820 		rand_orient = OrientGens.get("rand",{"n":1,"phitoo":1})		#Fetches the orientation generator
3821 		c1_sym = Symmetries.get("c1")								#Generates the asymmetric unit from which you wish to generate a random orientation
3822 		random_transform = rand_orient.gen_orientations(c1_sym)[0]	#Generates a random orientation (in a Transform object) using the generator and asymmetric unit specified 
3823 		if options.align and options.align[1]:
3824 			options.align[1].update({'transform' : random_transform})
3825 		else:
3826 			transform = random_transform
3827 	
3828 	bestcoarse=[{"score":1.0e10,"xform.align3d":Transform()}]
3829 	if not options.align and transform:
3830 		#if not transform:
3831 		#	bestcoarse=[{"score":1.0e10,"xform.align3d":Transform()}]
3832 		#else:
3833 		ccf = sfixedimage.calc_ccf( simage )
3834 		locmax = ccf.calc_max_location()
3835 							
3836 		locmaxX = locmax[0]
3837 		locmaxY = locmax[1]
3838 		locmaxZ = locmax[2]
3839 		
3840 		score = ccf.get_value_at( locmaxX, locmaxY, locmaxZ )
3841 		
3842 		bestcoarse=[{"score":score,"xform.align3d":transform}]	
3843 
3844 	elif options.align:
3845 		'''
3846 		Returns an ordered vector of Dicts of length options.npeakstorefine. 
3847 		The Dicts in the vector have keys "score" and "xform.align3d" 
3848 		'''
3849 		
3850 		#print "Will do coarse alignment"
3851 		
3852 		#print "\n\n\nRight before COARSE alignment, the boxsize of image is", simage['nx'],simage['ny'],simage['nz']
3853 		#print "Right before COARSE alignment, the boxsize of FIXEDimage is", sfixedimage['nx'],sfixedimage['ny'],sfixedimage['nz']
3854 		#print "Side adjustments will be attempted\n\n\n\n"
3855 		
3856 	
3857 		#print "\n\n\nRight before COARSE ali, AFTER size adj, the boxsize of image is", simage['nx'],simage['ny'],simage['nz']
3858 		#print "Right before COARSE alignment,  AFTER size adj, the boxsize of FIXEDimage is", sfixedimage['nx'],sfixedimage['ny'],sfixedimage['nz']
3859 		
3860 		#if options.align:
3861 		
3862 		if simage['nx'] != sfixedimage['nx'] or simage['ny'] != sfixedimage['ny'] or simage['nz'] != sfixedimage['nz']:
3863 			print "\n\nERROR: COARSE alignment images not the same size"
3864 			#print "\nThe particle's COARSE size is", simage['nx'],simage['ny'],simage['nz']
3865 			#print "\nThe reference's COARSE size is", sfixedimage['nx'],sfixedimage['ny'],sfixedimage['nz']
3866 			sys.exit()	
3867 		
3868 		#some aligners don't have the ability to return 'nbest' answers
3869 	#	try:
3870 	
3871 		#print "\n\noptions.align is", options.align, type(options.align)
3872 		#print "\noptions.align[0]", options.align[0]
3873 		#print "\nsimage and type", simage, type(simage)
3874 		#print "\nsfixedimage and type", sfixedimage, type(sfixedimage)
3875 		
3876 		bestcoarse = simage.xform_align_nbest(options.align[0],sfixedimage,options.align[1],options.npeakstorefine,options.aligncmp[0],options.aligncmp[1])
3877 		#except:
3878 		#	bestcoarse = simage.align(options.align[0],sfixedimage,options.align[1],options.npeakstorefine,options.aligncmp[0],options.aligncmp[1])
3879 		
3880 		
3881 		if 'rotate_translate_3d_tree' not in options.align[0]:	
3882 			# Scale translation
3883 			scaletrans=1.0
3884 			if options.falign and options.falign != None and options.falign != 'None' and options.falign != 'none' and options.shrinkfine:
3885 				scaletrans = options.shrink/float(options.shrinkfine)
3886 			elif options.shrink and not options.falign:
3887 				scaletrans=float(options.shrink)
3888 			
3889 			if scaletrans>1.0:
3890 				#print "\n\n\nShrink or shrinkfine are greater than 1 and not equal, and therefore translations need to be scaled!"
3891 				#print "Before, translations are", bestcoarse[0]['xform.align3d'].get_trans()
3892 				#print "Transform is", bestcoarse[0]['xform.align3d']
3893 			
3894 				for c in bestcoarse:
3895 					c["xform.align3d"].set_trans(c["xform.align3d"].get_trans()*scaletrans)
3896 			
3897 				#print "After scaling, translations are", c['xform.align3d'].get_trans()
3898 				#print "Transform is", c['xform.align3d']
3899 
3900 			elif options.shrink > 1.0 and options.shrinkfine > 1.0 and options.shrink == options.shrinkfine:
3901 				pass
3902 				#print "\n\nshrink and shrink refine were equal!\n\n"
3903 		else:
3904 			print "\ntree alignment returned this best score and alignment", bestcoarse
3905 		
3906 			
3907 	# verbose printout
3908 	if options.verbose > 1 :
3909 		for i,j in enumerate(bestcoarse): 
3910 			pass
3911 			#print "coarse %d. %1.5g\t%s"%(i,j["score"],str(j["xform.align3d"]))
3912 
3913 	besttweak = bestfinal = bestcoarse
3914 	
3915 	if 'rotate_translate_3d_tree' not in options.align[0]:	
3916 		if options.falign and options.falign[0] and options.falign != 'None' and options.falign != 'none' and options.falign[0] != "None" and options.falign[0] != 'none':
3917 			#print "\n(e2spt_classaverage)(alignment) Will do fine alignment, over these many peaks", len(bestcoarse)
3918 			# Now loop over the individual peaks and refine each
3919 			bestfinal=[]
3920 			#besttweak = []
3921 			peaknum=0
3922 			#print "\n(e2spt_classaverage)(alignment) options.falign is", options.falign, type(options.falign)
3923 			for bc in bestcoarse:
3924 			
3925 				options.falign[1]["xform.align3d"] = bc["xform.align3d"]
3926 			
3927 				#print "\n(e2spt_classaverage)(alignment) s2image['nx'] == s2fixedimage['nx']", s2image['nx'] == s2fixedimage['nx'],  s2image['nx'], type(s2image['nx']), s2fixedimage['nx'], type(s2fixedimage['nx'])
3928 				#print "\n(e2spt_classaverage)(alignment) s2image['ny'] == s2fixedimage['ny']", s2image['ny'] == s2fixedimage['ny'],  s2image['ny'], type(s2image['ny']), s2fixedimage['ny'], type(s2fixedimage['ny'])
3929 				#print "\n(e2spt_classaverage)(alignment) s2image['nz'] == s2fixedimage['nz']", s2image['nz'] == s2fixedimage['nz'],  s2image['nz'], type(s2image['nz']), s2fixedimage['nz'], type(s2fixedimage['nz'])
3930 			
3931 				if int(s2image['nx']) != int(s2fixedimage['nx']) or int(s2image['ny']) != int(s2fixedimage['ny']) or int(s2image['nz']) != int(s2fixedimage['nz']):
3932 					print "\n(e2spt_classaverage)(alignment) ERROR: FINE alignment images not the same size"
3933 					#print "\nThe particle's FINE size is", s2image['nx'],s2image['ny'],s2image['nz']
3934 					#print "\nThe reference's FINE size is", s2fixedimage['nx'],s2fixedimage['ny'],s2fixedimage['nz']
3935 					sys.exit('MIE')
3936 			
3937 				ali = s2image.align(options.falign[0],s2fixedimage,options.falign[1],options.faligncmp[0],options.faligncmp[1])
3938 				
3939 				#if not options.tweak:
3940 				
3941 				try: 					
3942 					bestfinal.append({"score":ali["score"],"xform.align3d":ali["xform.align3d"],"coarse":bc})
3943 					#print "\nThe appended score in TRY is", bestfinal[0]['score']					
3944 				except:
3945 					bestfinal.append({"score":1.0e10,"xform.align3d":bc["xform.align3d"],"coarse":bc})
3946 				#print "\nThe appended score in EXCEPT is", bestfinal[0]['score']
3947 				peaknum+=1
3948 			
3949 			if options.verbose:
3950 				pass
3951 				#print "Best final is", bestfinal
3952 															
3953 			
3954 			#print "\n\n\nAfter fine alignment, before SHRINK compensation, the transform is", bestfinal[0]['xform.align3d']		
3955 			if options.shrinkfine>1 :
3956 				
3957 					#C: you need to sort here to be able to tweak productively
3958 					#C: If you just sort 'bestfinal' it will be sorted based on the 'coarse' key in the dictionaries of the list
3959 					#C: because they come before the 'score' key of the dictionary (alphabetically)
3960 				
3961 				
3962 				for c in bestfinal:
3963 					
3964 					#print "fixing these translations", c
3965 					newtrans = c["xform.align3d"].get_trans() * float(options.shrinkfine)
3966 					#print "New trans and type are", newtrans, type(newtrans)
3967 					c["xform.align3d"].set_trans(newtrans)
3968 					
3969 				if options.tweak:
3970 					print "tweak is on"
3971 					bestfinal = sorted(bestfinal, key=itemgetter('score'))
3972 				
3973 					originalLpFine = options.lowpassfine
3974 					if options.lowpassfine:
3975 						originalLpRes = options.lowpassfine[1]['cutoff_freq']
3976 						newres = originalLpRes*2
3977 						options.lowpassfine[1]['cutoff_freq'] = newres
3978 					
3979 					
3980 					
3981 					if reffullsize['nx'] != imgfullsize['nx'] or  reffullsize['ny'] != imgfullsize['ny'] or  reffullsize['nz'] != imgfullsize['nz']:
3982 						print "ERROR: reffullsize and imgfullsize are not the same size" 
3983 						print "reffullsize", reffullsize['nx'], reffullsize['ny'], reffullsize['nz']
3984 						print "imgfullsize", imgfullsize['nx'], imgfullsize['ny'], imgfullsize['nz']
3985 						sys.exit()
3986 			
3987 					print "\ntweaking alignment!"	
3988 					bestT = bestfinal[0]["xform.align3d"]
3989 					bestScore = bestfinal[0]['score']
3990 				
3991 					print "best alignment was", bestT
3992 					tweakrange = options.falign[1]['delta']+0.5
3993 					print "tweaking range is", tweakrange
3994 					tweakdelta = options.falign[1]['delta']/2.0 - 0.1
3995 					print "tweaking step is", tweakdelta
3996 
3997 					tweaksearch = options.shrinkfine
3998 			
3999 					if options.lowpassfine:
4000 						print "new options.lowpassfine is", options.lowpassfine
4001 			
4002 					reffullsizeali = reffullsize.copy()
4003 					imgfullsizeali = imgfullsize.copy()
4004 			
4005 					reffullsizeali = preprocessing(reffullsizeali,options,ptclindx, 'ref' ,'no',round,'noshrink')
4006 					imgfullsizeali = preprocessing(imgfullsizeali,options,ptclindx, savetagp ,'no',round,'noshrink')
4007 						
4008 						
4009 					print "before alitweak, sizes of img are and apix", imgfullsizeali['nx'], imgfullsizeali['ny'], imgfullsizeali['nz'],imgfullsizeali['apix_x']
4010 					print "before alitweak, sizes of ref are and apix", reffullsizeali['nx'], reffullsizeali['ny'], reffullsizeali['nz'],reffullsizeali['apix_x']
4011 					
4012 					#print "aligner to tweak is", ['refine_3d_grid',{'xform.align3d':bestT,'range':tweakrange,'delta':tweakdelta,'search':tweaksearch}]
4013 					
4014 					alitweak = imgfullsizeali.align('refine_3d_grid',reffullsizeali,{'xform.align3d':bestT,'range':tweakrange,'delta':tweakdelta,'search':2},options.faligncmp[0],options.faligncmp[1])
4015 					
4016 					#alitweak =imgfullsizeali.align('refine_3d_grid',reffullsizeali,{'xform.align3d':bestT,'range':tweakrange,'delta':tweakdelta,'search':tweaksearch},options.faligncmp[0],options.faligncmp[1])
4017 					
4018 					#reffullsizeali.transform( bestT )
4019 					
4020 					#alitweak =imgfullsizeali.align('rotate_translate_3d_grid',reffullsizeali,{'phi0':0, 'phi1':tweakrange,'dphi':tweakdelta,'az0':0,'az1':360,'daz':tweakdelta,'alt0':0,'alt1':tweakrange,'dalt':tweakdelta,'search':tweaksearch},options.faligncmp[0],options.faligncmp[1])
4021 					#alitweak =imgfullsizeali.align('rotate_translate_3d',reffullsizeali,{'range':tweakrange,'delta':tweakdelta,'search':tweaksearch},options.faligncmp[0],options.faligncmp[1])
4022 					
4023 					
4024 					try: 					
4025 						besttweak.append({"score":ali["score"],"xform.align3d":ali["xform.align3d"],"coarse":bc})
4026 						#print "\nThe appended score in TRY is", bestfinal[0]['score']					
4027 					except:
4028 						besttweak.append({"score":1.0e10,"xform.align3d":bc["xform.align3d"],"coarse":bc})
4029 						#print "\nThe appended score in EXCEPT is", bestfinal[0]['score']
4030 					
4031 					
4032 					#if options.sym and options.sym is not 'c1' and options.sym is not 'C1' and 'sym' not in options.align:
4033 					#	options.align += ':sym=' + options.sym
4034 				
4035 					besttweakT = bestT
4036 					besttweakScore = 1.0e10
4037 					try: 
4038 						besttweakScore = alitweak["score"]
4039 						besttweakT = alitweak["xform.align3d"]					
4040 					
4041 						if float( besttweakScore ) < float( bestScore ) and besttweakT != bestT:
4042 					
4043 							print "tweaking improved score from %.6f to %.6f" %( float( bestScore ), float( besttweakScore ) )
4044 							bestfinal[0]['score'] = besttweakScore
4045 							bestfinal[0]["xform.align3d"] = besttweakT
4046 							print "and changed the transform from, to", bestT, besttweakT
4047 						else:
4048 							print "tweaking did not improve score; therefore, it will be ignored."			
4049 
4050 					except:
4051 						print "WARNING: tweaking failed!"
4052 
4053 					options.lowpassfine = originalLpFine
4054 				
4055 				else:
4056 					print "NOT tweaking!"								
4057 						
4058 					
4059 				
4060 			#print "AFTER fine alignment, after SHRINK compensation, the transform is", bestfinal[0]['xform.align3d']		
4061 			#print "\n\n\n"
4062 		
4063 			#verbose printout of fine refinement
4064 			if options.verbose>1 :
4065 				
4066 				for i,j in enumerate(bestfinal): 
4067 					pass
4068 					#print "fine %d. %1.5g\t%s"%(i,j["score"],str(j["xform.align3d"]))
4069 		
4070 			bestfinal = bestcoarse
4071 			if options.verbose:
4072 				pass
4073 				#print "\nThere was no fine alignment; therefore, score is", bestfinal[0]['score']
4074 	#else: 
4075 	#	bestfinal = bestcoarse
4076 	#	if options.verbose:
4077 	#		pass
4078 
4079 	
4080 	if options.tweak:
4081 		bestfinal = besttweak
4082 	
4083 	from operator import itemgetter							#If you just sort 'bestfinal' it will be sorted based on the 'coarse' key in the dictionaries of the list
4084 															#because they come before the 'score' key of the dictionary (alphabetically). Should be sorted already, except if there was no --falign
4085 	bestfinal = sorted(bestfinal, key=itemgetter('score'))
4086 
4087 	
4088 	#print "Best final answer determined"
4089 	if options.verbose:
4090 		#print "\nThe best peaks sorted are"	#confirm the peaks are adequately sorted
4091 		#for i in bestfinal:
4092 		#	print i
4093 		pass
4094 	
4095 	if bestfinal[0]["score"] == 1.0e10 and options.falign:
4096 		print "Error: all fine alignments failed for %s. May need to consider altering filter/shrink parameters. Using coarse alignment, but results are likely invalid."%self.options["label"]
4097 	
4098 	if options.verbose: 
4099 		#print "Best %1.5g\t %s"%(bestfinal[0]["score"],str(bestfinal[0]["xform.align3d"]))
4100 		#print "Inside ALIGNMENT function in e2spt_classaverage, done aligning ",label
4101 		pass	
4102 	
4103 	print "\n(e2spt_classaverage)(alignment)Rreturning from alignment from aligning", label	
4104 	
4105 	#print "\n\n\nRRRRRRRRR\n Returning from alignment", 
4106 	#print "bestfinal",bestfinal
4107 	#print "and bestcorase", bestcoarse
4108 	
4109 	'''
4110 	bestfinal was sorted, but should also sort bestcoarse in case it's used independently later
4111 	'''
4112 	
4113 	bestcoarse = sorted(bestcoarse, key=itemgetter('score'))
4114 	
4115 	gc.collect() 	#free up unused memory
4116 	
4117 	return [bestfinal, bestcoarse]
4118 	
4119 
4120 jsonclasses["Align3DTask"]=Align3DTask.from_jsondict
4121 
4122 
4123 def classmx_ptcls(classmx,n):
4124 	"""Scans a classmx file to determine which images are in a specific class. Classmx may be a filename or an EMData object.
4125 	returns a list of integers"""
4126 	print "Classmx and its type received in classmx_ptcls are", classmx, type(classmx)
4127 	if isinstance(classmx,str): 
4128 		classmx=EMData(classmx,0)
4129 	
4130 	plist=[i.y for i in classmx.find_pixels_with_value(float(n))]
4131 	
4132 	return plist
4133 
4134 
4135 	
4136 if __name__ == "__main__":
4137     main()
4138     sys.stdout.flush()

Attached Files

To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.

You are not allowed to attach a file to this page.